aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvelina Dumitrescu <evelinad@google.com>2016-09-12 23:27:37 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-02-01 18:13:43 -0800
commit77abf01f2cff8bb3a5564f554d9218c085b83d65 (patch)
tree78ce3e8a4b517d786a56c8d9e0dcf49359951819
parentdaddec971693271ea429fce9026adc33dc71bcb6 (diff)
downloadtoolchain-utils-77abf01f2cff8bb3a5564f554d9218c085b83d65.tar.gz
user_activity: Added implementation for the metrics.
For every function, we compute a distance based on the sum of inclusive value fraction differences of pairs of common child functions from a sample, and reference function sets. For every function, we assign a score based on the distance, inclusive value fraction from the sample, and reference function sets. For every Chrome OS component, we compute a set of metrics consisting in the number of functions, the average, and cumulative distance and the total score of the functions matching the group. BUG=None TEST=benchmark_metrics_unittest passes Change-Id: I7810ec86bc4fa488b49ee18de3b3bda2ef95ba09 Reviewed-on: https://chrome-internal-review.googlesource.com/286161 Reviewed-by: Luis Lozano <llozano@chromium.org> Tested-by: Evelina Dumitrescu <evelinad@google.com> Reviewed-on: https://chromium-review.googlesource.com/435910 Commit-Ready: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org>
-rw-r--r--user_activity_benchmarks/benchmark_metrics.py138
1 files changed, 138 insertions, 0 deletions
diff --git a/user_activity_benchmarks/benchmark_metrics.py b/user_activity_benchmarks/benchmark_metrics.py
new file mode 100644
index 00000000..ff029a8a
--- /dev/null
+++ b/user_activity_benchmarks/benchmark_metrics.py
@@ -0,0 +1,138 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Computes the metrics for functions, Chrome OS components and benchmarks."""
+
+import collections
+
+
+def ComputeDistanceForFunction(child_functions_statistics_sample,
+ child_functions_statistics_reference):
+ """Computes the distance metric for a function.
+
+ Args:
+ child_functions_statistics_sample: A dict that has as a key the name of a
+ function and as a value the inclusive count fraction. The keys are
+ the child functions of a sample parent function.
+ child_functions_statistics_reference: A dict that has as a key the name of
+ a function and as a value the inclusive count fraction. The keys are
+ the child functions of a reference parent function.
+ Returns:
+ A float value representing the sum of inclusive count fraction
+ differences of pairs of common child functions. If a child function is
+ present in a single data set, then we consider the missing inclusive
+ count fraction as 0. This value describes the difference in behaviour
+ between a sample and the reference parent function.
+ """
+ # We initialize the distance with a small value to avoid the further
+ # division by zero.
+ distance = 1.0
+
+ for child_function, inclusive_count_fraction_reference in \
+ child_functions_statistics_reference.iteritems():
+ inclusive_count_fraction_sample = 0.0
+
+ if child_function in child_functions_statistics_sample:
+ inclusive_count_fraction_sample = \
+ child_functions_statistics_sample[child_function]
+ distance += \
+ abs(inclusive_count_fraction_sample -
+ inclusive_count_fraction_reference)
+
+ for child_function, inclusive_count_fraction_sample in \
+ child_functions_statistics_sample.iteritems():
+ if child_function not in child_functions_statistics_reference:
+ distance += inclusive_count_fraction_sample
+
+ return distance
+
+
+def ComputeScoreForFunction(distance, reference_fraction, sample_fraction):
+ """Computes the score for a function.
+
+ Args:
+ distance: A float value representing the difference in behaviour between
+ the sample and the reference function.
+ reference_fraction: A float value representing the inclusive count
+ fraction of the reference function.
+ sample_fraction: A float value representing the inclusive count
+ fraction of the sample function.
+ Returns:
+ A float value representing the score of the function.
+ """
+ return reference_fraction * sample_fraction / distance
+
+
+def ComputeMetricsForComponents(cwp_function_groups, function_metrics):
+ """Computes the metrics for a set of Chrome OS components.
+
+ For every Chrome OS group, we compute the number of functions matching the
+ group, the cumulative and average score, the cumulative and average distance
+ of all those functions. A function matches a group if the path of the file
+ containing its definition contains the common path describing the group.
+
+ Args:
+ cwp_function_groups: A dict having as a key the name of the group and as a
+ value a common path describing the group.
+ function_metrics: A dict having as a key the name of the function and the
+ name of the file where it is declared concatenated by a ',', and as a
+ value a tuple containing the distance and the score metrics.
+ Returns:
+ A dict containing as a key the name of the group and as a value a tuple
+ with the group file path, the number of functions matching the group,
+ the cumulative and average score, cumulative and average distance of all
+ those functions.
+ """
+ function_groups_metrics = \
+ collections.defaultdict(lambda : (0, 0.0, 0.0, 0.0, 0.0))
+
+ for function_key, metric in function_metrics.iteritems():
+ function, function_file = function_key.split(',')
+
+ for group, common_path in cwp_function_groups:
+ if common_path not in function_file:
+ continue
+
+ function_distance = metric[0]
+ function_score = metric[1]
+ group_statistic = function_groups_metrics[group]
+
+ function_count = group_statistic[1] + 1
+ function_distance_cum = function_distance + group_statistic[2]
+ function_distance_avg = function_distance_cum / float(function_count)
+ function_score_cum = function_score + group_statistic[4]
+ function_score_avg = function_score_cum / float(function_count)
+
+ function_groups_metrics[group] = \
+ (common_path,
+ function_count,
+ function_distance_cum,
+ function_distance_avg,
+ function_score_cum,
+ function_score_avg)
+ break
+
+ return function_groups_metrics
+
+
+def ComputeMetricsForBenchmark(function_metrics):
+ function_count = len(function_metrics.keys())
+ distance_cum = 0.0
+ distance_avg = 0.0
+ score_cum = 0.0
+ score_avg = 0.0
+
+ for distance, score in function_metrics.values():
+ distance_cum += distance
+ score_cum += score
+
+ distance_avg = distance_cum / float(function_count)
+ score_avg = score_cum / float(function_count)
+ return function_count, distance_cum, distance_avg, score_cum, score_avg
+
+
+def ComputeMetricsForBenchmarkSet(benchmark_set_function_metrics,
+ cwp_function_groups):
+ """TODO(evelinad): Add the computation of the metrics for a set of benchmarks.
+ """
+ raise NotImplementedError()