aboutsummaryrefslogtreecommitdiff
path: root/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
blob: f95d46e7d39c7c836c2d6535002eb1eb5787ba4f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""End-to-end test for afdo_prof_analysis."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import date

import afdo_prof_analysis as analysis

import json
import shutil
import tempfile
import os
import unittest


class AfdoProfAnalysisE2ETest(unittest.TestCase):
  """Class for end-to-end testing of AFDO Profile Analysis"""

  # nothing significant about the values, just easier to remember even vs odd
  good_prof = {
      'func_a': ':1\n 1: 3\n 3: 5\n 5: 7\n',
      'func_b': ':3\n 3: 5\n 5: 7\n 7: 9\n',
      'func_c': ':5\n 5: 7\n 7: 9\n 9: 11\n',
      'func_d': ':7\n 7: 9\n 9: 11\n 11: 13\n',
      'good_func_a': ':11\n',
      'good_func_b': ':13\n'
  }

  bad_prof = {
      'func_a': ':2\n 2: 4\n 4: 6\n 6: 8\n',
      'func_b': ':4\n 4: 6\n 6: 8\n 8: 10\n',
      'func_c': ':6\n 6: 8\n 8: 10\n 10: 12\n',
      'func_d': ':8\n 8: 10\n 10: 12\n 12: 14\n',
      'bad_func_a': ':12\n',
      'bad_func_b': ':14\n'
  }

  expected = {
      'good_only_functions': False,
      'bad_only_functions': True,
      'bisect_results': {
          'ranges': [],
          'individuals': ['func_a']
      }
  }

  def test_afdo_prof_analysis(self):
    # Individual issues take precedence by nature of our algos
    # so first, that should be caught
    good = self.good_prof.copy()
    bad = self.bad_prof.copy()
    self.run_check(good, bad, self.expected)

    # Now remove individuals and exclusively BAD, and check that range is caught
    bad['func_a'] = good['func_a']
    bad.pop('bad_func_a')
    bad.pop('bad_func_b')

    expected_cp = self.expected.copy()
    expected_cp['bad_only_functions'] = False
    expected_cp['bisect_results'] = {
        'individuals': [],
        'ranges': [['func_b', 'func_c', 'func_d']]
    }

    self.run_check(good, bad, expected_cp)

  def test_afdo_prof_state(self):
    """Verifies that saved state is correct replication."""
    temp_dir = tempfile.mkdtemp()
    self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

    good = self.good_prof.copy()
    bad = self.bad_prof.copy()
    # add more functions to data
    for x in range(400):
      good['func_%d' % x] = ''
      bad['func_%d' % x] = ''

    fd_first, first_result = tempfile.mkstemp(dir=temp_dir)
    os.close(fd_first)
    fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
    os.close(fd_state)
    self.run_check(
        self.good_prof,
        self.bad_prof,
        self.expected,
        state_file=state_file,
        out_file=first_result)

    fd_second, second_result = tempfile.mkstemp(dir=temp_dir)
    os.close(fd_second)
    completed_state_file = '%s.completed.%s' % (state_file, str(date.today()))
    self.run_check(
        self.good_prof,
        self.bad_prof,
        self.expected,
        state_file=completed_state_file,
        no_resume=False,
        out_file=second_result)

    with open(first_result) as f:
      initial_run = json.load(f)
    with open(second_result) as f:
      loaded_run = json.load(f)
    self.assertEqual(initial_run, loaded_run)

  def test_exit_on_problem_status(self):
    temp_dir = tempfile.mkdtemp()
    self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

    fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
    os.close(fd_state)
    with self.assertRaises(RuntimeError):
      self.run_check(
          self.good_prof,
          self.bad_prof,
          self.expected,
          state_file=state_file,
          extern_decider='problemstatus_external.sh')

  def run_check(self,
                good_prof,
                bad_prof,
                expected,
                state_file=None,
                no_resume=True,
                out_file=None,
                extern_decider=None):
    temp_dir = tempfile.mkdtemp()
    self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

    good_prof_file = '%s/%s' % (temp_dir, 'good_prof.txt')
    bad_prof_file = '%s/%s' % (temp_dir, 'bad_prof.txt')
    good_prof_text = analysis.json_to_text(good_prof)
    bad_prof_text = analysis.json_to_text(bad_prof)
    with open(good_prof_file, 'w') as f:
      f.write(good_prof_text)
    with open(bad_prof_file, 'w') as f:
      f.write(bad_prof_text)

    analysis.FLAGS.good_prof = good_prof_file
    analysis.FLAGS.bad_prof = bad_prof_file
    if state_file:
      analysis.FLAGS.state_file = state_file
    analysis.FLAGS.no_resume = no_resume
    analysis.FLAGS.analysis_output_file = out_file or '/dev/null'

    dir_path = os.path.dirname(os.path.realpath(__file__))  # dir of this file
    external_script = '%s/%s' % (dir_path, extern_decider or 'e2e_external.sh')
    analysis.FLAGS.external_decider = external_script

    actual = analysis.main(None)
    actual.pop('seed')  # nothing to check
    self.assertEqual(actual, expected)


if __name__ == '__main__':
  unittest.main()