aboutsummaryrefslogtreecommitdiff
path: root/user_activity_benchmarks/benchmark_metrics.py
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2017-07-22 01:20:28 +0000
committerandroid-build-merger <android-build-merger@google.com>2017-07-22 01:20:28 +0000
commitec1dfabf71cfe08796ba80ae03f4857b2797f5f9 (patch)
treebf139ee25415cecde142e95791edba3803b2452a /user_activity_benchmarks/benchmark_metrics.py
parent3690e025de8daaed03c4acb02d2b054e5c4c0dd5 (diff)
parentf1a93c225b92dc0059e7d7e2de7c7bd0a493e23d (diff)
downloadtoolchain-utils-pie-qpr1-release.tar.gz
Merge branch 'aosp/mirror-chromium-master' into update_utils am: 4307f4735e am: 18caef1edd am: 6c551e0d52 am: ddfea1f7e7android-wear-9.0.0_r9android-wear-9.0.0_r8android-wear-9.0.0_r7android-wear-9.0.0_r6android-wear-9.0.0_r5android-wear-9.0.0_r4android-wear-9.0.0_r34android-wear-9.0.0_r33android-wear-9.0.0_r32android-wear-9.0.0_r31android-wear-9.0.0_r30android-wear-9.0.0_r3android-wear-9.0.0_r29android-wear-9.0.0_r28android-wear-9.0.0_r27android-wear-9.0.0_r26android-wear-9.0.0_r25android-wear-9.0.0_r24android-wear-9.0.0_r23android-wear-9.0.0_r22android-wear-9.0.0_r21android-wear-9.0.0_r20android-wear-9.0.0_r2android-wear-9.0.0_r19android-wear-9.0.0_r18android-wear-9.0.0_r17android-wear-9.0.0_r16android-wear-9.0.0_r15android-wear-9.0.0_r14android-wear-9.0.0_r13android-wear-9.0.0_r12android-wear-9.0.0_r11android-wear-9.0.0_r10android-wear-9.0.0_r1android-vts-9.0_r9android-vts-9.0_r8android-vts-9.0_r7android-vts-9.0_r6android-vts-9.0_r5android-vts-9.0_r4android-vts-9.0_r19android-vts-9.0_r18android-vts-9.0_r17android-vts-9.0_r16android-vts-9.0_r15android-vts-9.0_r14android-vts-9.0_r13android-vts-9.0_r12android-vts-9.0_r11android-vts-9.0_r10android-security-9.0.0_r76android-security-9.0.0_r75android-security-9.0.0_r74android-security-9.0.0_r73android-security-9.0.0_r72android-security-9.0.0_r71android-security-9.0.0_r70android-security-9.0.0_r69android-security-9.0.0_r68android-security-9.0.0_r67android-security-9.0.0_r66android-security-9.0.0_r65android-security-9.0.0_r64android-security-9.0.0_r63android-security-9.0.0_r62android-o-mr1-iot-release-1.0.4android-o-mr1-iot-release-1.0.3android-cts-9.0_r9android-cts-9.0_r8android-cts-9.0_r7android-cts-9.0_r6android-cts-9.0_r5android-cts-9.0_r4android-cts-9.0_r3android-cts-9.0_r20android-cts-9.0_r2android-cts-9.0_r19android-cts-9.0_r18android-cts-9.0_r17android-cts-9.0_r16android-cts-9.0_r15android-cts-9.0_r14android-cts-9.0_r13android-cts-9.0_r12android-cts-9.0_r11android-cts-9.0_r10android-cts-9.0_r1android-9.0.0_r9android-9.0.0_r8android-9.0.0_r7android-9.0.0_r61android-9.0.0_r60android-9.0.0_r6android-9.0.0_r59android-9.0.0_r58android-9.0.0_r57android-9.0.0_r56android-9.0.0_r55android-9.0.0_r54android-9.0.0_r53android-9.0.0_r52android-9.0.0_r51android-9.0.0_r50android-9.0.0_r5android-9.0.0_r49android-9.0.0_r48android-9.0.0_r47android-9.0.0_r46android-9.0.0_r45android-9.0.0_r44android-9.0.0_r43android-9.0.0_r42android-9.0.0_r41android-9.0.0_r40android-9.0.0_r39android-9.0.0_r38android-9.0.0_r37android-9.0.0_r36android-9.0.0_r35android-9.0.0_r34android-9.0.0_r33android-9.0.0_r32android-9.0.0_r31android-9.0.0_r30android-9.0.0_r3android-9.0.0_r22android-9.0.0_r21android-9.0.0_r20android-9.0.0_r2android-9.0.0_r19android-9.0.0_r18android-9.0.0_r17android-9.0.0_r16android-9.0.0_r12android-9.0.0_r11android-9.0.0_r10android-9.0.0_r1security-pi-releasepie-vts-releasepie-security-releasepie-s2-releasepie-release-2pie-releasepie-r2-s2-releasepie-r2-s1-releasepie-r2-releasepie-qpr3-s1-releasepie-qpr3-releasepie-qpr3-b-releasepie-qpr2-releasepie-qpr1-s3-releasepie-qpr1-s2-releasepie-qpr1-s1-releasepie-qpr1-releasepie-platform-releasepie-gsipie-dr1-releasepie-dr1-devpie-devpie-cuttlefish-testingpie-cts-releasepie-b4s4-releasepie-b4s4-dev
am: f1a93c225b Change-Id: I60d258ddb8fd0bf9632f1f70c5cdeb3078938059
Diffstat (limited to 'user_activity_benchmarks/benchmark_metrics.py')
-rw-r--r--user_activity_benchmarks/benchmark_metrics.py306
1 files changed, 0 insertions, 306 deletions
diff --git a/user_activity_benchmarks/benchmark_metrics.py b/user_activity_benchmarks/benchmark_metrics.py
deleted file mode 100644
index 30ae31e0..00000000
--- a/user_activity_benchmarks/benchmark_metrics.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Computes the metrics for functions, Chrome OS components and benchmarks."""
-
-from collections import defaultdict
-
-
-def ComputeDistanceForFunction(child_functions_statistics_sample,
- child_functions_statistics_reference):
- """Computes the distance metric for a function.
-
- Args:
- child_functions_statistics_sample: A dict that has as a key the name of a
- function and as a value the inclusive count fraction. The keys are
- the child functions of a sample parent function.
- child_functions_statistics_reference: A dict that has as a key the name of
- a function and as a value the inclusive count fraction. The keys are
- the child functions of a reference parent function.
-
- Returns:
- A float value representing the sum of inclusive count fraction
- differences of pairs of common child functions. If a child function is
- present in a single data set, then we consider the missing inclusive
- count fraction as 0. This value describes the difference in behaviour
- between a sample and the reference parent function.
- """
- # We initialize the distance with a small value to avoid the further
- # division by zero.
- distance = 1.0
-
- for child_function, inclusive_count_fraction_reference in \
- child_functions_statistics_reference.iteritems():
- inclusive_count_fraction_sample = 0.0
-
- if child_function in child_functions_statistics_sample:
- inclusive_count_fraction_sample = \
- child_functions_statistics_sample[child_function]
- distance += \
- abs(inclusive_count_fraction_sample -
- inclusive_count_fraction_reference)
-
- for child_function, inclusive_count_fraction_sample in \
- child_functions_statistics_sample.iteritems():
- if child_function not in child_functions_statistics_reference:
- distance += inclusive_count_fraction_sample
-
- return distance
-
-
-def ComputeScoreForFunction(distance, reference_fraction, sample_fraction):
- """Computes the score for a function.
-
- Args:
- distance: A float value representing the difference in behaviour between
- the sample and the reference function.
- reference_fraction: A float value representing the inclusive count
- fraction of the reference function.
- sample_fraction: A float value representing the inclusive count
- fraction of the sample function.
-
- Returns:
- A float value representing the score of the function.
- """
- return reference_fraction * sample_fraction / distance
-
-
-def ComputeMetricsForComponents(cwp_function_groups, function_metrics):
- """Computes the metrics for a set of Chrome OS components.
-
- For every Chrome OS group, we compute the number of functions matching the
- group, the cumulative and average score, the cumulative and average distance
- of all those functions. A function matches a group if the path of the file
- containing its definition contains the common path describing the group.
-
- Args:
- cwp_function_groups: A dict having as a key the name of the group and as a
- value a common path describing the group.
- function_metrics: A dict having as a key the name of the function and the
- name of the file where it is declared concatenated by a ',', and as a
- value a tuple containing the distance and the score metrics.
-
- Returns:
- A dict containing as a key the name of the group and as a value a tuple
- with the group file path, the number of functions matching the group,
- the cumulative and average score, cumulative and average distance of all
- those functions.
- """
- function_groups_metrics = defaultdict(lambda: (0, 0.0, 0.0, 0.0, 0.0))
-
- for function_key, metric in function_metrics.iteritems():
- _, function_file = function_key.split(',')
-
- for group, common_path in cwp_function_groups:
- if common_path not in function_file:
- continue
-
- function_distance = metric[0]
- function_score = metric[1]
- group_statistic = function_groups_metrics[group]
-
- function_count = group_statistic[1] + 1
- function_distance_cum = function_distance + group_statistic[2]
- function_distance_avg = function_distance_cum / float(function_count)
- function_score_cum = function_score + group_statistic[4]
- function_score_avg = function_score_cum / float(function_count)
-
- function_groups_metrics[group] = \
- (common_path,
- function_count,
- function_distance_cum,
- function_distance_avg,
- function_score_cum,
- function_score_avg)
- break
-
- return function_groups_metrics
-
-
-def ComputeMetricsForBenchmark(function_metrics):
- function_count = len(function_metrics.keys())
- distance_cum = 0.0
- distance_avg = 0.0
- score_cum = 0.0
- score_avg = 0.0
-
- for distance, score in function_metrics.values():
- distance_cum += distance
- score_cum += score
-
- distance_avg = distance_cum / float(function_count)
- score_avg = score_cum / float(function_count)
- return function_count, distance_cum, distance_avg, score_cum, score_avg
-
-
-def ComputeFunctionCountForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the function count metric pair for the benchmark set.
-
- For the function count metric, we count the unique functions covered by the
- set of benchmarks. We compute the fraction of unique functions out
- of the amount of CWP functions given.
-
- We compute also the same metric pair for every group from the keys of the
- set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the metric pair represents the
- function count and the second value the function count fraction.
- The dict has as a key the name of the group and as a value a dict that
- maps the metric_string to the values of the metric pair of the group.
- """
- cwp_functions_count = sum(len(functions)
- for functions in cwp_functions.itervalues())
- set_groups_functions = defaultdict(set)
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- set_groups_functions[group_name] |= \
- set(benchmark_function_metrics[group_name])
-
- set_groups_functions_count = {}
- set_functions_count = 0
- for group_name, functions \
- in set_groups_functions.iteritems():
- set_group_functions_count = len(functions)
- if group_name in cwp_functions:
- set_groups_functions_count[group_name] = {
- metric_string[0]: set_group_functions_count,
- metric_string[1]:
- set_group_functions_count / float(len(cwp_functions[group_name]))}
- else:
- set_groups_functions_count[group_name] = \
- {metric_string[0]: set_group_functions_count, metric_string[1]: 0.0}
- set_functions_count += set_group_functions_count
-
- set_functions_count_fraction = \
- set_functions_count / float(cwp_functions_count)
- return (set_functions_count, set_functions_count_fraction), \
- set_groups_functions_count
-
-
-def ComputeDistanceForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the distance variation metric pair for the benchmark set.
-
- For the distance variation metric, we compute the sum of the distance
- variations of the functions covered by a set of benchmarks.
- We define the distance variation as the difference between the distance
- value of a functions and the ideal distance value (1.0).
- If a function appears in multiple common functions files, we consider
- only the minimum value. We compute also the distance variation per
- function.
-
- In addition, we compute also the same metric pair for every group from
- the keys of the set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the metric pair represents the
- distance variation per function and the second value the distance variation.
- The dict has as a key the name of the group and as a value a dict that
- maps the metric_string to the values of the metric pair of the group.
- """
- set_unique_functions = defaultdict(lambda: defaultdict(lambda: float('inf')))
- set_function_count = 0
- total_distance_variation = 0.0
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- for function_key, metrics in \
- benchmark_function_metrics[group_name].iteritems():
- previous_distance = \
- set_unique_functions[group_name][function_key]
- min_distance = min(metrics[0], previous_distance)
- set_unique_functions[group_name][function_key] = min_distance
- groups_distance_variations = defaultdict(lambda: (0.0, 0.0))
- for group_name, functions_distances in set_unique_functions.iteritems():
- group_function_count = len(functions_distances)
- group_distance_variation = \
- sum(functions_distances.itervalues()) - float(group_function_count)
- total_distance_variation += group_distance_variation
- set_function_count += group_function_count
- groups_distance_variations[group_name] = \
- {metric_string[0]:
- group_distance_variation / float(group_function_count),
- metric_string[1]: group_distance_variation}
-
- return (total_distance_variation / set_function_count,
- total_distance_variation), groups_distance_variations
-
-
-def ComputeScoreForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the function count metric pair for the benchmark set.
-
- For the score metric, we compute the sum of the scores of the functions
- from a set of benchmarks. If a function appears in multiple common
- functions files, we consider only the maximum value. We compute also the
- fraction of this sum from the sum of all the scores of the functions from
- the CWP data covering the given groups, in the ideal case (the ideal
- score of a function is 1.0).
-
- In addition, we compute the same metric pair for every group from the
- keys of the set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the pair is the fraction of the sum
- of the scores from the ideal case and the second value represents the
- sum of scores of the functions. The dict has as a key the name of the group
- and as a value a dict that maps the metric_string to the values of the
- metric pair of the group.
- """
- cwp_functions_count = sum(len(functions)
- for functions in cwp_functions.itervalues())
- set_unique_functions = defaultdict(lambda: defaultdict(lambda: 0.0))
- total_score = 0.0
-
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- for function_key, metrics in \
- benchmark_function_metrics[group_name].iteritems():
- previous_score = \
- set_unique_functions[group_name][function_key]
- max_score = max(metrics[1], previous_score)
- set_unique_functions[group_name][function_key] = max_score
-
- groups_scores = defaultdict(lambda: (0.0, 0.0))
-
- for group_name, function_scores in set_unique_functions.iteritems():
- group_function_count = float(len(cwp_functions[group_name]))
- group_score = sum(function_scores.itervalues())
- total_score += group_score
- groups_scores[group_name] = {
- metric_string[0]: group_score / group_function_count,
- metric_string[1]: group_score
- }
-
- return (total_score / cwp_functions_count, total_score), groups_scores