aboutsummaryrefslogtreecommitdiff
path: root/afdo_tools/bisection/afdo_prof_analysis_test.py
blob: babfc0211813a8880bae5eb40cb2dec2b638f209 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Tests for afdo_prof_analysis."""


import random
import io
import unittest

from afdo_tools.bisection import afdo_prof_analysis as analysis


class AfdoProfAnalysisTest(unittest.TestCase):
    """Class for testing AFDO Profile Analysis"""

    bad_items = {"func_a": "1", "func_b": "3", "func_c": "5"}
    good_items = {"func_a": "2", "func_b": "4", "func_d": "5"}
    random.seed(13)  # 13 is an arbitrary choice. just for consistency
    # add some extra info to make tests more reflective of real scenario
    for num in range(128):
        func_name = "func_extra_%d" % num
        # 1/3 to both, 1/3 only to good, 1/3 only to bad
        rand_val = random.randint(1, 101)
        if rand_val < 67:
            bad_items[func_name] = "test_data"
        if rand_val < 34 or rand_val >= 67:
            good_items[func_name] = "test_data"

    analysis.random.seed(5)  # 5 is an arbitrary choice. For consistent testing

    def test_text_to_json(self):
        test_data = io.StringIO(
            "deflate_slow:87460059:3\n"
            " 3: 24\n"
            " 14: 54767\n"
            " 15: 664 fill_window:22\n"
            " 16: 661\n"
            " 19: 637\n"
            " 41: 36692 longest_match:36863\n"
            " 44: 36692\n"
            " 44.2: 5861\n"
            " 46: 13942\n"
            " 46.1: 14003\n"
        )
        expected = {
            "deflate_slow": ":87460059:3\n"
            " 3: 24\n"
            " 14: 54767\n"
            " 15: 664 fill_window:22\n"
            " 16: 661\n"
            " 19: 637\n"
            " 41: 36692 longest_match:36863\n"
            " 44: 36692\n"
            " 44.2: 5861\n"
            " 46: 13942\n"
            " 46.1: 14003\n"
        }
        actual = analysis.text_to_json(test_data)
        self.assertEqual(actual, expected)
        test_data.close()

    def test_text_to_json_empty_afdo(self):
        expected = {}
        actual = analysis.text_to_json("")
        self.assertEqual(actual, expected)

    def test_json_to_text(self):
        example_prof = {"func_a": ":1\ndata\n", "func_b": ":2\nmore data\n"}
        expected_text = "func_a:1\ndata\nfunc_b:2\nmore data\n"
        self.assertEqual(analysis.json_to_text(example_prof), expected_text)

    def test_bisect_profiles(self):

        # mock run of external script with arbitrarily-chosen bad profile vals
        # save_run specified and unused b/c afdo_prof_analysis.py
        # will call with argument explicitly specified
        # pylint: disable=unused-argument
        class DeciderClass(object):
            """Class for this tests's decider."""

            def run(self, prof, save_run=False):
                if "1" in prof["func_a"] or "3" in prof["func_b"]:
                    return analysis.StatusEnum.BAD_STATUS
                return analysis.StatusEnum.GOOD_STATUS

        results = analysis.bisect_profiles_wrapper(
            DeciderClass(), self.good_items, self.bad_items
        )
        self.assertEqual(results["individuals"], sorted(["func_a", "func_b"]))
        self.assertEqual(results["ranges"], [])

    def test_range_search(self):

        # arbitrarily chosen functions whose values in the bad profile constitute
        # a problematic pair
        # pylint: disable=unused-argument
        class DeciderClass(object):
            """Class for this tests's decider."""

            def run(self, prof, save_run=False):
                if "1" in prof["func_a"] and "3" in prof["func_b"]:
                    return analysis.StatusEnum.BAD_STATUS
                return analysis.StatusEnum.GOOD_STATUS

        # put the problematic combination in separate halves of the common funcs
        # so that non-bisecting search is invoked for its actual use case
        common_funcs = [
            func for func in self.good_items if func in self.bad_items
        ]
        common_funcs.remove("func_a")
        common_funcs.insert(0, "func_a")
        common_funcs.remove("func_b")
        common_funcs.append("func_b")

        problem_range = analysis.range_search(
            DeciderClass(),
            self.good_items,
            self.bad_items,
            common_funcs,
            0,
            len(common_funcs),
        )

        self.assertEqual(["func_a", "func_b"], problem_range)

    def test_check_good_not_bad(self):
        func_in_good = "func_c"

        # pylint: disable=unused-argument
        class DeciderClass(object):
            """Class for this tests's decider."""

            def run(self, prof, save_run=False):
                if func_in_good in prof:
                    return analysis.StatusEnum.GOOD_STATUS
                return analysis.StatusEnum.BAD_STATUS

        self.assertTrue(
            analysis.check_good_not_bad(
                DeciderClass(), self.good_items, self.bad_items
            )
        )

    def test_check_bad_not_good(self):
        func_in_bad = "func_d"

        # pylint: disable=unused-argument
        class DeciderClass(object):
            """Class for this tests's decider."""

            def run(self, prof, save_run=False):
                if func_in_bad in prof:
                    return analysis.StatusEnum.BAD_STATUS
                return analysis.StatusEnum.GOOD_STATUS

        self.assertTrue(
            analysis.check_bad_not_good(
                DeciderClass(), self.good_items, self.bad_items
            )
        )


if __name__ == "__main__":
    unittest.main()