aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTing-Yuan Huang <laszio@chromium.org>2017-03-16 11:29:38 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-03-16 18:37:41 -0700
commit20862346b0916135b49bc9be1f3011167c8888ab (patch)
tree3b0399f3a72894cb96fd76215b7c3db9d78b877e
parent92bf473b1812c922babc0fd2fdd2702f267f8315 (diff)
downloadtoolchain-utils-20862346b0916135b49bc9be1f3011167c8888ab.tar.gz
Remove user_activity_benchmarks.
It's going to be hosted in CWP. BUG=None TEST=None Change-Id: I16374dd23e6871f4cf3ab5665b6f7b71336175b1 Reviewed-on: https://chromium-review.googlesource.com/456517 Commit-Ready: Ting-Yuan Huang <laszio@chromium.org> Tested-by: Ting-Yuan Huang <laszio@chromium.org> Reviewed-by: Luis Lozano <llozano@chromium.org>
-rw-r--r--user_activity_benchmarks/benchmark_metrics.py306
-rwxr-xr-xuser_activity_benchmarks/benchmark_metrics_experiment.py233
-rwxr-xr-xuser_activity_benchmarks/benchmark_metrics_experiment_unittest.py79
-rwxr-xr-xuser_activity_benchmarks/benchmark_metrics_unittest.py87
-rwxr-xr-xuser_activity_benchmarks/collect_experiment_data.sh93
-rwxr-xr-xuser_activity_benchmarks/collect_experiment_data_odd_even_session.sh95
-rwxr-xr-xuser_activity_benchmarks/collect_pprof_data.sh41
-rwxr-xr-xuser_activity_benchmarks/collect_telemetry_profiles.sh90
-rw-r--r--user_activity_benchmarks/cwp_hot_functions_groups.txt314
-rwxr-xr-xuser_activity_benchmarks/process_hot_functions.py482
-rwxr-xr-xuser_activity_benchmarks/process_hot_functions_unittest.py223
-rw-r--r--user_activity_benchmarks/select_hot_functions.sql27
-rwxr-xr-xuser_activity_benchmarks/select_optimal_benchmark_set.py347
-rwxr-xr-xuser_activity_benchmarks/symbolize_profiles.sh32
-rw-r--r--user_activity_benchmarks/telemetry_benchmarks_R52_8350.68113
-rw-r--r--user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof3
-rw-r--r--user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof2
-rw-r--r--user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof4
-rw-r--r--user_activity_benchmarks/testdata/input/cwp_function_groups.txt3
-rw-r--r--user_activity_benchmarks/testdata/input/cwp_functions_file.csv38
-rw-r--r--user_activity_benchmarks/testdata/input/inclusive_count_reference.csv8
-rw-r--r--user_activity_benchmarks/testdata/input/inclusive_count_test.csv8
-rw-r--r--user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv5
-rw-r--r--user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv6
-rw-r--r--user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv6
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_top/file1.pprof20
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_top/file2.pprof17
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_top/file3.pprof21
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv15
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof29
-rw-r--r--user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv6
-rw-r--r--user_activity_benchmarks/testdata/results/pprof_common/file1.pprof3
-rw-r--r--user_activity_benchmarks/testdata/results/pprof_common/file2.pprof2
-rw-r--r--user_activity_benchmarks/testdata/results/pprof_common/file3.pprof4
-rw-r--r--user_activity_benchmarks/utils.py402
-rwxr-xr-xuser_activity_benchmarks/utils_unittest.py133
36 files changed, 0 insertions, 3297 deletions
diff --git a/user_activity_benchmarks/benchmark_metrics.py b/user_activity_benchmarks/benchmark_metrics.py
deleted file mode 100644
index 30ae31e0..00000000
--- a/user_activity_benchmarks/benchmark_metrics.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Computes the metrics for functions, Chrome OS components and benchmarks."""
-
-from collections import defaultdict
-
-
-def ComputeDistanceForFunction(child_functions_statistics_sample,
- child_functions_statistics_reference):
- """Computes the distance metric for a function.
-
- Args:
- child_functions_statistics_sample: A dict that has as a key the name of a
- function and as a value the inclusive count fraction. The keys are
- the child functions of a sample parent function.
- child_functions_statistics_reference: A dict that has as a key the name of
- a function and as a value the inclusive count fraction. The keys are
- the child functions of a reference parent function.
-
- Returns:
- A float value representing the sum of inclusive count fraction
- differences of pairs of common child functions. If a child function is
- present in a single data set, then we consider the missing inclusive
- count fraction as 0. This value describes the difference in behaviour
- between a sample and the reference parent function.
- """
- # We initialize the distance with a small value to avoid the further
- # division by zero.
- distance = 1.0
-
- for child_function, inclusive_count_fraction_reference in \
- child_functions_statistics_reference.iteritems():
- inclusive_count_fraction_sample = 0.0
-
- if child_function in child_functions_statistics_sample:
- inclusive_count_fraction_sample = \
- child_functions_statistics_sample[child_function]
- distance += \
- abs(inclusive_count_fraction_sample -
- inclusive_count_fraction_reference)
-
- for child_function, inclusive_count_fraction_sample in \
- child_functions_statistics_sample.iteritems():
- if child_function not in child_functions_statistics_reference:
- distance += inclusive_count_fraction_sample
-
- return distance
-
-
-def ComputeScoreForFunction(distance, reference_fraction, sample_fraction):
- """Computes the score for a function.
-
- Args:
- distance: A float value representing the difference in behaviour between
- the sample and the reference function.
- reference_fraction: A float value representing the inclusive count
- fraction of the reference function.
- sample_fraction: A float value representing the inclusive count
- fraction of the sample function.
-
- Returns:
- A float value representing the score of the function.
- """
- return reference_fraction * sample_fraction / distance
-
-
-def ComputeMetricsForComponents(cwp_function_groups, function_metrics):
- """Computes the metrics for a set of Chrome OS components.
-
- For every Chrome OS group, we compute the number of functions matching the
- group, the cumulative and average score, the cumulative and average distance
- of all those functions. A function matches a group if the path of the file
- containing its definition contains the common path describing the group.
-
- Args:
- cwp_function_groups: A dict having as a key the name of the group and as a
- value a common path describing the group.
- function_metrics: A dict having as a key the name of the function and the
- name of the file where it is declared concatenated by a ',', and as a
- value a tuple containing the distance and the score metrics.
-
- Returns:
- A dict containing as a key the name of the group and as a value a tuple
- with the group file path, the number of functions matching the group,
- the cumulative and average score, cumulative and average distance of all
- those functions.
- """
- function_groups_metrics = defaultdict(lambda: (0, 0.0, 0.0, 0.0, 0.0))
-
- for function_key, metric in function_metrics.iteritems():
- _, function_file = function_key.split(',')
-
- for group, common_path in cwp_function_groups:
- if common_path not in function_file:
- continue
-
- function_distance = metric[0]
- function_score = metric[1]
- group_statistic = function_groups_metrics[group]
-
- function_count = group_statistic[1] + 1
- function_distance_cum = function_distance + group_statistic[2]
- function_distance_avg = function_distance_cum / float(function_count)
- function_score_cum = function_score + group_statistic[4]
- function_score_avg = function_score_cum / float(function_count)
-
- function_groups_metrics[group] = \
- (common_path,
- function_count,
- function_distance_cum,
- function_distance_avg,
- function_score_cum,
- function_score_avg)
- break
-
- return function_groups_metrics
-
-
-def ComputeMetricsForBenchmark(function_metrics):
- function_count = len(function_metrics.keys())
- distance_cum = 0.0
- distance_avg = 0.0
- score_cum = 0.0
- score_avg = 0.0
-
- for distance, score in function_metrics.values():
- distance_cum += distance
- score_cum += score
-
- distance_avg = distance_cum / float(function_count)
- score_avg = score_cum / float(function_count)
- return function_count, distance_cum, distance_avg, score_cum, score_avg
-
-
-def ComputeFunctionCountForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the function count metric pair for the benchmark set.
-
- For the function count metric, we count the unique functions covered by the
- set of benchmarks. We compute the fraction of unique functions out
- of the amount of CWP functions given.
-
- We compute also the same metric pair for every group from the keys of the
- set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the metric pair represents the
- function count and the second value the function count fraction.
- The dict has as a key the name of the group and as a value a dict that
- maps the metric_string to the values of the metric pair of the group.
- """
- cwp_functions_count = sum(len(functions)
- for functions in cwp_functions.itervalues())
- set_groups_functions = defaultdict(set)
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- set_groups_functions[group_name] |= \
- set(benchmark_function_metrics[group_name])
-
- set_groups_functions_count = {}
- set_functions_count = 0
- for group_name, functions \
- in set_groups_functions.iteritems():
- set_group_functions_count = len(functions)
- if group_name in cwp_functions:
- set_groups_functions_count[group_name] = {
- metric_string[0]: set_group_functions_count,
- metric_string[1]:
- set_group_functions_count / float(len(cwp_functions[group_name]))}
- else:
- set_groups_functions_count[group_name] = \
- {metric_string[0]: set_group_functions_count, metric_string[1]: 0.0}
- set_functions_count += set_group_functions_count
-
- set_functions_count_fraction = \
- set_functions_count / float(cwp_functions_count)
- return (set_functions_count, set_functions_count_fraction), \
- set_groups_functions_count
-
-
-def ComputeDistanceForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the distance variation metric pair for the benchmark set.
-
- For the distance variation metric, we compute the sum of the distance
- variations of the functions covered by a set of benchmarks.
- We define the distance variation as the difference between the distance
- value of a functions and the ideal distance value (1.0).
- If a function appears in multiple common functions files, we consider
- only the minimum value. We compute also the distance variation per
- function.
-
- In addition, we compute also the same metric pair for every group from
- the keys of the set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the metric pair represents the
- distance variation per function and the second value the distance variation.
- The dict has as a key the name of the group and as a value a dict that
- maps the metric_string to the values of the metric pair of the group.
- """
- set_unique_functions = defaultdict(lambda: defaultdict(lambda: float('inf')))
- set_function_count = 0
- total_distance_variation = 0.0
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- for function_key, metrics in \
- benchmark_function_metrics[group_name].iteritems():
- previous_distance = \
- set_unique_functions[group_name][function_key]
- min_distance = min(metrics[0], previous_distance)
- set_unique_functions[group_name][function_key] = min_distance
- groups_distance_variations = defaultdict(lambda: (0.0, 0.0))
- for group_name, functions_distances in set_unique_functions.iteritems():
- group_function_count = len(functions_distances)
- group_distance_variation = \
- sum(functions_distances.itervalues()) - float(group_function_count)
- total_distance_variation += group_distance_variation
- set_function_count += group_function_count
- groups_distance_variations[group_name] = \
- {metric_string[0]:
- group_distance_variation / float(group_function_count),
- metric_string[1]: group_distance_variation}
-
- return (total_distance_variation / set_function_count,
- total_distance_variation), groups_distance_variations
-
-
-def ComputeScoreForBenchmarkSet(set_function_metrics, cwp_functions,
- metric_string):
- """Computes the function count metric pair for the benchmark set.
-
- For the score metric, we compute the sum of the scores of the functions
- from a set of benchmarks. If a function appears in multiple common
- functions files, we consider only the maximum value. We compute also the
- fraction of this sum from the sum of all the scores of the functions from
- the CWP data covering the given groups, in the ideal case (the ideal
- score of a function is 1.0).
-
- In addition, we compute the same metric pair for every group from the
- keys of the set_function_metrics dict.
-
- Args:
- set_function_metrics: A list of dicts having as a key the name of a group
- and as value a list of functions that match the given group.
- cwp_functions: A dict having as a key the name of the groups and as a value
- the list of CWP functions that match an individual group.
- metric_string: A tuple of strings that will be mapped to the tuple of metric
- values in the returned function group dict. This is done for convenience
- for the JSON output.
-
- Returns:
- A tuple with the metric pair and a dict with the group names and values
- of the metric pair. The first value of the pair is the fraction of the sum
- of the scores from the ideal case and the second value represents the
- sum of scores of the functions. The dict has as a key the name of the group
- and as a value a dict that maps the metric_string to the values of the
- metric pair of the group.
- """
- cwp_functions_count = sum(len(functions)
- for functions in cwp_functions.itervalues())
- set_unique_functions = defaultdict(lambda: defaultdict(lambda: 0.0))
- total_score = 0.0
-
- for benchmark_function_metrics in set_function_metrics:
- for group_name in benchmark_function_metrics:
- for function_key, metrics in \
- benchmark_function_metrics[group_name].iteritems():
- previous_score = \
- set_unique_functions[group_name][function_key]
- max_score = max(metrics[1], previous_score)
- set_unique_functions[group_name][function_key] = max_score
-
- groups_scores = defaultdict(lambda: (0.0, 0.0))
-
- for group_name, function_scores in set_unique_functions.iteritems():
- group_function_count = float(len(cwp_functions[group_name]))
- group_score = sum(function_scores.itervalues())
- total_score += group_score
- groups_scores[group_name] = {
- metric_string[0]: group_score / group_function_count,
- metric_string[1]: group_score
- }
-
- return (total_score / cwp_functions_count, total_score), groups_scores
diff --git a/user_activity_benchmarks/benchmark_metrics_experiment.py b/user_activity_benchmarks/benchmark_metrics_experiment.py
deleted file mode 100755
index e8152e74..00000000
--- a/user_activity_benchmarks/benchmark_metrics_experiment.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/python2
-#
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Runs an experiment with the benchmark metrics on a pair of CWP data sets.
-
-A data set should contain the files with the pairwise inclusive and the
-inclusive statistics. The pairwise inclusive file contains pairs of
-parent and child functions with their inclusive count fractions out of the
-total amount of inclusive count values and the files of the child functions.
-The inclusive file contains the functions with their inclusive count fraction
-out of the total amount of inclusive count values and the file name of the
-function. The input data should be collected using the scripts
-collect_experiment_data.sh or collect_experiment_data_odd_even_session.sh
-
-For every function, this script computes the distance and the score values.
-The output is stored in the file cwp_functions_statistics_file.
-
-For every Chrome OS component, this script computes a set of metrics consisting
-in the number of functions, the average and cumulative distance and score of
-the functions matching the group. The output is stored in the file
-cwp_function_groups_statistics_file.
-"""
-
-import argparse
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class MetricsExperiment(object):
- """Runs an experiment with the benchmark metrics on a pair of data sets."""
-
- def __init__(self, cwp_pairwise_inclusive_reference,
- cwp_pairwise_inclusive_test, cwp_inclusive_reference,
- cwp_inclusive_test, cwp_function_groups_file,
- cwp_function_groups_statistics_file,
- cwp_function_statistics_file):
- """Initializes the MetricsExperiment class.
-
- Args:
- cwp_pairwise_inclusive_reference: The CSV file containing the pairwise
- inclusive values from the reference data set.
- cwp_pairwise_inclusive_test: The CSV file containing the pairwise
- inclusive values from the test data set.
- cwp_inclusive_reference: The CSV file containing the inclusive values
- from the reference data set.
- cwp_inclusive_test: The CSV file containing the inclusive values from
- the test data set.
- cwp_function_groups_file: The CSV file containing the groups of functions.
- cwp_function_groups_statistics_file: The output CSV file that will
- contain the metrics for the function groups.
- cwp_function_statistics_file: The output CSV file that will contain the
- metrics for the CWP functions.
- """
- self._cwp_pairwise_inclusive_reference = cwp_pairwise_inclusive_reference
- self._cwp_pairwise_inclusive_test = cwp_pairwise_inclusive_test
- self._cwp_inclusive_reference = cwp_inclusive_reference
- self._cwp_inclusive_test = cwp_inclusive_test
- self._cwp_function_groups_file = cwp_function_groups_file
- self._cwp_function_groups_statistics_file = \
- cwp_function_groups_statistics_file
- self._cwp_function_statistics_file = cwp_function_statistics_file
-
- def PerformComputation(self):
- """Does the benchmark metrics experimental computation.
-
- For every function, it is computed a distance based on the sum of the
- differences of the fractions spent in the child functions. Afterwards,
- it is computed a score based on the inclusive values fractions and the
- distance value. The statistics for all the function are written in the file
- self._cwp_function_statistics_file.
-
- The functions are grouped on Chrome OS components based on the path of the
- file where a function is defined. For every group, there are computed the
- total number of functions matching that group, the cumulative distance, the
- average distance and the cumulative score of the functions.
- """
-
- inclusive_statistics_reference = \
- utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_reference)
- inclusive_statistics_cum_reference = \
- utils.ComputeCWPCummulativeInclusiveStatistics(
- inclusive_statistics_reference)
- inclusive_statistics_test = \
- utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_test)
- inclusive_statistics_cum_test = \
- utils.ComputeCWPCummulativeInclusiveStatistics(
- inclusive_statistics_test)
- pairwise_inclusive_statistics_reference = \
- utils.ParseCWPPairwiseInclusiveCountFile(
- self._cwp_pairwise_inclusive_reference)
- pairwise_inclusive_fractions_reference = \
- utils.ComputeCWPChildFunctionsFractions(
- inclusive_statistics_cum_reference,
- pairwise_inclusive_statistics_reference)
- pairwise_inclusive_statistics_test = \
- utils.ParseCWPPairwiseInclusiveCountFile(
- self._cwp_pairwise_inclusive_test)
- pairwise_inclusive_fractions_test = \
- utils.ComputeCWPChildFunctionsFractions(
- inclusive_statistics_cum_test,
- pairwise_inclusive_statistics_test)
- parent_function_statistics = {}
-
- with open(self._cwp_function_groups_file) as input_file:
- cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
-
- for parent_function_key, parent_function_statistics_test \
- in inclusive_statistics_test.iteritems():
- parent_function_name, _ = parent_function_key.split(',')
- parent_function_fraction_test = parent_function_statistics_test[2]
-
- parent_function_fraction_reference = \
- inclusive_statistics_reference[parent_function_key][2]
-
- child_functions_fractions_test = \
- pairwise_inclusive_fractions_test.get(parent_function_name, {})
-
- child_functions_fractions_reference = \
- pairwise_inclusive_fractions_reference.get(parent_function_name, {})
-
- distance = benchmark_metrics.ComputeDistanceForFunction(
- child_functions_fractions_test, child_functions_fractions_reference)
-
- parent_function_score_test = benchmark_metrics.ComputeScoreForFunction(
- distance, parent_function_fraction_test,
- parent_function_fraction_reference)
-
- parent_function_statistics[parent_function_key] = \
- (distance, parent_function_score_test)
-
- with open(self._cwp_function_statistics_file, 'w') as output_file:
- statistics_lines = ['function,file,distance,score']
- statistics_lines += \
- [','.join([parent_function_key.replace(';;', ','),
- str(statistic[0]),
- str(statistic[1])])
- for parent_function_key, statistic
- in parent_function_statistics.iteritems()]
- output_file.write('\n'.join(statistics_lines))
-
- cwp_groups_statistics_test = benchmark_metrics.ComputeMetricsForComponents(
- cwp_function_groups, parent_function_statistics)
-
- with open(self._cwp_function_groups_statistics_file, 'w') as output_file:
- group_statistics_lines = \
- ['group,file_path,function_count,distance_cum,distance_avg,score_cum,'
- 'score_avg']
- group_statistics_lines += \
- [','.join([group_name,
- str(statistic[0]),
- str(statistic[1]),
- str(statistic[2]),
- str(statistic[3]),
- str(statistic[4]),
- str(statistic[5])])
- for group_name, statistic
- in cwp_groups_statistics_test.iteritems()]
- output_file.write('\n'.join(group_statistics_lines))
-
-
-def ParseArguments(arguments):
- parser = argparse.ArgumentParser(
- description='Runs an experiment with the benchmark metrics on a pair of '
- 'CWP data sets.')
- parser.add_argument(
- '--cwp_pairwise_inclusive_reference',
- required=True,
- help='The reference CSV file that will contain a pair of parent and '
- 'child functions with their inclusive count fractions out of the total '
- 'amount of inclusive count values.')
- parser.add_argument(
- '--cwp_pairwise_inclusive_test',
- required=True,
- help='The test CSV file that will contain a pair of parent and '
- 'child functions with their inclusive count fractions out of the total '
- 'amount of inclusive count values.')
- parser.add_argument(
- '--cwp_inclusive_reference',
- required=True,
- help='The reference CSV file that will contain a function with its '
- 'inclusive count fraction out of the total amount of inclusive count '
- 'values.')
- parser.add_argument(
- '--cwp_inclusive_test',
- required=True,
- help='The test CSV file that will contain a function with its '
- 'inclusive count fraction out of the total amount of inclusive count '
- 'values.')
- parser.add_argument(
- '-g',
- '--cwp_function_groups_file',
- required=True,
- help='The file that will contain the CWP function groups.'
- 'A line consists in the group name and a file path. A group must '
- 'represent a ChromeOS component.')
- parser.add_argument(
- '-s',
- '--cwp_function_groups_statistics_file',
- required=True,
- help='The output file that will contain the metric statistics for the '
- 'CWP function groups in CSV format. A line consists in the group name, '
- 'file path, number of functions matching the group, the total score '
- 'and distance values.')
- parser.add_argument(
- '-f',
- '--cwp_function_statistics_file',
- required=True,
- help='The output file that will contain the metric statistics for the '
- 'CWP functions in CSV format. A line consists in the function name, file '
- 'name, cummulative distance, average distance, cummulative score and '
- 'average score values.')
-
- options = parser.parse_args(arguments)
- return options
-
-
-def Main(argv):
- options = ParseArguments(argv)
- metrics_experiment = MetricsExperiment(
- options.cwp_pairwise_inclusive_reference,
- options.cwp_pairwise_inclusive_test, options.cwp_inclusive_reference,
- options.cwp_inclusive_test, options.cwp_function_groups_file,
- options.cwp_function_groups_statistics_file,
- options.cwp_function_statistics_file)
- metrics_experiment.PerformComputation()
-
-
-if __name__ == '__main__':
- Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py b/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py
deleted file mode 100755
index c4755efe..00000000
--- a/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the benchmark_metrics_experiment module."""
-
-import os
-import tempfile
-import unittest
-
-from benchmark_metrics_experiment import MetricsExperiment
-
-
-class MetricsExperimentTest(unittest.TestCase):
- """Test class for MetricsExperiment class."""
-
- def __init__(self, *args, **kwargs):
- super(MetricsExperimentTest, self).__init__(*args, **kwargs)
- self._pairwise_inclusive_count_test_file = \
- 'testdata/input/pairwise_inclusive_count_test.csv'
- self._pairwise_inclusive_count_reference_file = \
- 'testdata/input/pairwise_inclusive_count_reference.csv'
- self._inclusive_count_test_file = \
- 'testdata/input/inclusive_count_test.csv'
- self._inclusive_count_reference_file = \
- 'testdata/input/inclusive_count_reference.csv'
- self._cwp_function_groups_file = \
- 'testdata/input/cwp_function_groups.txt'
-
- def _CheckFileContents(self, file_name, expected_content_lines):
- with open(file_name) as input_file:
- result_content_lines = input_file.readlines()
- self.assertListEqual(expected_content_lines, result_content_lines)
-
- def testExperiment(self):
- group_statistics_file, group_statistics_filename = tempfile.mkstemp()
-
- os.close(group_statistics_file)
-
- function_statistics_file, function_statistics_filename = tempfile.mkstemp()
-
- os.close(function_statistics_file)
-
-
- expected_group_statistics_lines = \
- ['group,file_path,function_count,distance_cum,distance_avg,score_cum,'
- 'score_avg\n',
- 'ab,/a/b,2.0,3.01,1.505,8.26344228895,4.13172114448\n',
- 'e,/e,2.0,2.0,1.0,27.5,13.75\n',
- 'cd,/c/d,2.0,2.0,1.0,27.5,13.75']
- expected_function_statistics_lines = \
- ['function,file,distance,score\n',
- 'func_i,/c/d/file_i,1.0,17.6\n',
- 'func_j,/e/file_j,1.0,27.5\n',
- 'func_f,/a/b/file_f,1.59,1.4465408805\n',
- 'func_h,/c/d/file_h,1.0,9.9\n',
- 'func_k,/e/file_k,1.0,0.0\n',
- 'func_g,/a/b/file_g,1.42,6.81690140845']
- metric_experiment = \
- MetricsExperiment(self._pairwise_inclusive_count_reference_file,
- self._pairwise_inclusive_count_test_file,
- self._inclusive_count_reference_file,
- self._inclusive_count_test_file,
- self._cwp_function_groups_file,
- group_statistics_filename,
- function_statistics_filename)
-
- metric_experiment.PerformComputation()
- self._CheckFileContents(group_statistics_filename,
- expected_group_statistics_lines)
- self._CheckFileContents(function_statistics_filename,
- expected_function_statistics_lines)
- os.remove(group_statistics_filename)
- os.remove(function_statistics_filename)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/user_activity_benchmarks/benchmark_metrics_unittest.py b/user_activity_benchmarks/benchmark_metrics_unittest.py
deleted file mode 100755
index a48361fe..00000000
--- a/user_activity_benchmarks/benchmark_metrics_unittest.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the benchmark_metrics module."""
-
-import mock
-import unittest
-import benchmark_metrics
-
-
-class MetricsComputationTest(unittest.TestCase):
- """Test class for MetricsComputation class."""
-
- def __init__(self, *args, **kwargs):
- super(MetricsComputationTest, self).__init__(*args, **kwargs)
-
- def testComputeDistanceForFunction(self):
- child_functions_statistics_sample = {
- 'f,file_f': 0.1,
- 'g,file_g': 0.2,
- 'h,file_h': 0.3,
- 'i,file_i': 0.4
- }
- child_functions_statistics_reference = {
- 'f,file_f': 0.4,
- 'i,file_i': 0.4,
- 'h,file_h2': 0.2
- }
- distance = benchmark_metrics.ComputeDistanceForFunction(
- child_functions_statistics_sample, child_functions_statistics_reference)
- self.assertEqual(distance, 2.0)
-
- distance = benchmark_metrics.ComputeDistanceForFunction({}, {})
- self.assertEqual(distance, 1.0)
-
- distance = benchmark_metrics.ComputeDistanceForFunction(
- child_functions_statistics_sample, {})
- self.assertEqual(distance, 2.0)
-
- distance = benchmark_metrics.ComputeDistanceForFunction(
- {}, child_functions_statistics_reference)
- self.assertEqual(distance, 2.0)
-
- def testComputeScoreForFunction(self):
- score = benchmark_metrics.ComputeScoreForFunction(1.2, 0.3, 0.4)
- self.assertEqual(score, 0.1)
-
- def testComputeMetricsForComponents(self):
- function_metrics = {
- 'func_f,/a/b/file_f': (1.0, 2.3),
- 'func_g,/a/b/file_g': (1.1, 1.5),
- 'func_h,/c/d/file_h': (2.0, 1.7),
- 'func_i,/c/d/file_i': (1.9, 1.8),
- 'func_j,/c/d/file_j': (1.8, 1.9),
- 'func_k,/e/file_k': (1.2, 2.1),
- 'func_l,/e/file_l': (1.3, 3.1)
- }
- cwp_function_groups = [('ab', '/a/b'), ('cd', '/c/d'), ('e', '/e')]
- expected_metrics = {'ab': ('/a/b', 2.0, 2.1, 1.05, 3.8, 1.9),
- 'e': ('/e', 2.0, 2.5, 1.25, 5.2, 2.6),
- 'cd': ('/c/d', 3.0, 5.7, 1.9000000000000001, 5.4, 1.8)}
- result_metrics = benchmark_metrics.ComputeMetricsForComponents(
- cwp_function_groups, function_metrics)
-
- self.assertDictEqual(expected_metrics, result_metrics)
-
- def testComputeMetricsForBenchmark(self):
- function_metrics = {'func_f': (1.0, 2.0),
- 'func_g': (1.1, 2.1),
- 'func_h': (1.2, 2.2),
- 'func_i': (1.3, 2.3)}
- expected_benchmark_metrics = \
- (4, 4.6000000000000005, 1.1500000000000001, 8.6, 2.15)
- result_benchmark_metrics = \
- benchmark_metrics.ComputeMetricsForBenchmark(function_metrics)
-
- self.assertEqual(expected_benchmark_metrics, result_benchmark_metrics)
-
- def testComputeMetricsForBenchmarkSet(self):
- """TODO(evelinad): Add unit test for ComputeMetricsForBenchmarkSet."""
- pass
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/user_activity_benchmarks/collect_experiment_data.sh b/user_activity_benchmarks/collect_experiment_data.sh
deleted file mode 100755
index a76cec82..00000000
--- a/user_activity_benchmarks/collect_experiment_data.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses Dremel queries to collect the inclusive and pairwise inclusive
-# statistics.
-
-set -e
-
-if [ "$#" -ne 7 ]; then
- echo "USAGE: collect_experiment_data.sh cwp_table board board_arch " \
- "Chrome_version Chrome_OS_version inclusive_output_file " \
- "pairwise_inclusive_output_file"
- exit 1
-fi
-
-readonly TABLE=$1
-readonly INCLUSIVE_OUTPUT_FILE=$6
-readonly PAIRWISE_INCLUSIVE_OUTPUT_FILE=$7
-readonly PERIODIC_COLLECTION=1
-readonly WHERE_CLAUSE_SPECIFICATIONS="meta.cros.board = '$2' AND \
- meta.cros.cpu_architecture = '$3' AND \
- meta.cros.chrome_version LIKE '%$4%' AND \
- meta.cros.version = '$5' AND \
- meta.cros.collection_info.trigger_event = $PERIODIC_COLLECTION AND \
- session.total_count > 2000"
-
-# Collects the function, with its file, the object and inclusive count
-# fraction out of the total amount of inclusive count values.
-echo "
-SELECT
- replace(frame.function_name, \", \", \"; \") AS function,
- frame.filename AS file,
- frame.load_module_path AS dso,
- SUM(frame.inclusive_count) AS inclusive_count,
- SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count_fraction
-FROM
- $TABLE table,
- table.frame frame
-CROSS JOIN (
- SELECT
- SUM(count) AS value
- FROM
- $TABLE
- WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
- function,
- file,
- dso
-HAVING
- inclusive_count_fraction > 0.0
-ORDER BY
- inclusive_count_fraction DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv \
- > "$INCLUSIVE_OUTPUT_FILE"
-
-# Collects the pair of parent and child functions, with the file and object
-# where the child function is declared and the inclusive count fraction of the
-# pair out of the total amount of inclusive count values.
-echo "
-SELECT
- CONCAT(replace(frame.parent_function_name, \", \", \"; \"), \";;\",
- replace(frame.function_name, \", \", \"; \")) AS parent_child_functions,
- frame.filename AS child_function_file,
- frame.load_module_path AS child_function_dso,
- SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count
-FROM
- $TABLE table,
- table.frame frame
-CROSS JOIN (
- SELECT
- SUM(count) AS value
- FROM $TABLE
- WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
- parent_child_functions,
- child_function_file,
- child_function_dso
-HAVING
- inclusive_count > 0.0
-ORDER BY
- inclusive_count DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
- "$PAIRWISE_INCLUSIVE_OUTPUT_FILE"
diff --git a/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh b/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh
deleted file mode 100755
index 900e582b..00000000
--- a/user_activity_benchmarks/collect_experiment_data_odd_even_session.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses Dremel queries to collect the inclusive and pairwise inclusive statistics
-# for odd/even profile collection session ids.
-# The data is collected for an odd or even collection session id.
-
-set -e
-
-if [ $# -lt 8 ]; then
- echo "Usage: collect_experiment_data_odd_even_session.sh cwp_table board " \
- "board_arch Chrome_version Chrome_OS_version odd_even " \
- "inclusive_output_file pairwise_inclusive_output_file"
- exit 1
-fi
-
-readonly TABLE=$1
-readonly INCLUSIVE_OUTPUT_FILE=$7
-readonly PAIRWISE_INCLUSIVE_OUTPUT_FILE=$8
-readonly PERIODIC_COLLECTION=1
-WHERE_CLAUSE_SPECIFICATIONS="meta.cros.board = '$2' AND \
- meta.cros.cpu_architecture = '$3' AND \
- meta.cros.chrome_version LIKE '%$4%' AND \
- meta.cros.version = '$5' AND \
- meta.cros.collection_info.trigger_event = $PERIODIC_COLLECTION AND \
- MOD(session.id, 2) = $6 AND \
- session.total_count > 2000"
-
-# Collects the function, with its file, the object and inclusive count
-# fraction out of the total amount of inclusive count values.
-echo "
-SELECT
- replace(frame.function_name, \", \", \"; \") AS function,
- frame.filename AS file,
- frame.load_module_path AS dso,
- SUM(frame.inclusive_count) AS inclusive_count,
- SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count_fraction
-FROM
- $TABLE table,
- table.frame frame
-CROSS JOIN (
- SELECT
- SUM(count) AS value
- FROM $TABLE
- WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
- function,
- file,
- dso
-HAVING
- inclusive_count_fraction > 0.0
-ORDER BY
- inclusive_count_fraction DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
- "$INCLUSIVE_OUTPUT_FILE"
-
-# Collects the pair of parent and child functions, with the file and object
-# where the child function is declared and the inclusive count fraction of the
-# pair out of the total amount of inclusive count values.
-echo "
-SELECT
- CONCAT(replace(frame.parent_function_name, \", \", \"; \"), \";;\",
- replace(frame.function_name, \", \", \"; \")) AS parent_child_functions,
- frame.filename AS child_function_file,
- frame.load_module_path AS child_function_dso,
- SUM(frame.inclusive_count)/ANY_VALUE(total.value) AS inclusive_count
-FROM
- $TABLE table,
- table.frame frame
-CROSS JOIN (
- SELECT
- SUM(count) AS value
- FROM
- $TABLE
- WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-) AS total
-WHERE
- $WHERE_CLAUSE_SPECIFICATIONS
-GROUP BY
- parent_child_functions,
- child_function_file,
- child_function_dso
-HAVING
- inclusive_count > 0.0
-ORDER BY
- inclusive_count DESC;
-" | dremel --sql_dialect=GoogleSQL --min_completion_ratio=1.0 --output=csv > \
- "$PAIRWISE_INCLUSIVE_OUTPUT_FILE"
diff --git a/user_activity_benchmarks/collect_pprof_data.sh b/user_activity_benchmarks/collect_pprof_data.sh
deleted file mode 100755
index 5b89f185..00000000
--- a/user_activity_benchmarks/collect_pprof_data.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Collects the pprof tree and top outputs.
-# All the local_cwp symbolized profiles are taken from the
-# local_cwp_results_path.
-# The pprof top output is stored in the pprof_top_results_path and the pprof
-# tree output is stored in the pprof_tree_results_path.
-
-set -e
-
-if [ "$#" -ne 3 ]; then
- echo "USAGE: collect_pprof_data.sh local_cwp_results_path " \
- "pprof_top_results_path pprof_tree_results_path"
- exit 1
-fi
-
-readonly LOCAL_CWP_RESULTS_PATH=$1
-readonly PPROF_TOP_RESULTS_PATH=$2
-readonly PPROF_TREE_RESULTS_PATH=$3
-readonly SYMBOLIZED_PROFILES=`ls $LOCAL_CWP_RESULTS_PATH`
-
-for symbolized_profile in "${SYMBOLIZED_PROFILES[@]}"
-do
- pprof --top "$LOCAL_CWP_RESULTS_PATH/${symbolized_profile}" > \
- "$PPROF_TOP_RESULTS_PATH/${symbolized_profile}.pprof"
- if [ $? -ne 0 ]; then
- echo "Failed to extract the pprof top output for the $symbolized_profile."
- continue
- fi
-
- pprof --tree "$LOCAL_CWP_RESULTS_PATH/${symbolized_profile}" > \
- "$PPROF_TREE_RESULTS_PATH/${symbolized_profile}.pprof"
- if [ $? -ne 0 ]; then
- echo "Failed to extract the pprof tree output for the " \
- "$symbolized_profile."
- continue
- fi
-done
diff --git a/user_activity_benchmarks/collect_telemetry_profiles.sh b/user_activity_benchmarks/collect_telemetry_profiles.sh
deleted file mode 100755
index 0583adca..00000000
--- a/user_activity_benchmarks/collect_telemetry_profiles.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Runs the Telemetry benchmarks with AutoTest and collects their perf profiles.
-# Reads the benchmark names from the telemetry_benchmark_file. Each benchmark
-# should be placed on a separate line.
-# The profile results are placed in the results_path.
-
-set -e
-
-if [ "$#" -ne 5 ]; then
- echo "USAGE: collect_telemetry_profiles.sh board chrome_root_path " \
- "machine_ip results_path telemetry_benchmarks_file"
- exit 1
-fi
-
-# CHROME_ROOT should contain the path with the source of Chrome. This is used by
-# AutoTest.
-export CHROME_ROOT=$2
-
-readonly BOARD=$1
-readonly IP=$3
-readonly RESULTS_PATH=$4
-readonly TELEMETRY_BENCHMARKS_FILE=$5
-
-# The following Telemetry benchmarks failed for the R52-8350.68.0 Chrome OS
-# version: page_cycler_v2.top_10_mobile,
-# page_cycler_v2.basic_oopif, smoothness.tough_filters_cases,
-# page_cycler_v2.intl_hi_ru,
-# image_decoding.image_decoding_measurement, system_health.memory_mobile,
-# memory.top_7_stress, smoothness.tough_path_rendering_cases,
-# page_cycler_v2.tough_layout_cases,
-# memory.long_running_idle_gmail_background_tbmv2, smoothness.tough_webgl_cases,
-# smoothness.tough_canvas_cases, smoothness.tough_texture_upload_cases,
-# top_10_mobile_memory_ignition, startup.large_profile.cold.blank_page,
-# page_cycler_v2.intl_ar_fa_he, start_with_ext.cold.blank_page,
-# start_with_ext.warm.blank_page, page_cycler_v2.intl_ko_th_vi,
-# smoothness.scrolling_tough_ad_case, page_cycler_v2_site_isolation.basic_oopif,
-# smoothness.tough_scrolling_cases, startup.large_profile.warm.blank_page,
-# page_cycler_v2.intl_es_fr_pt-BR, page_cycler_v2.intl_ja_zh,
-# memory.long_running_idle_gmail_tbmv2, smoothness.scrolling_tough_ad_cases,
-# page_cycler_v2.typical_25, smoothness.tough_webgl_ad_cases,
-# smoothness.tough_image_decode_cases.
-#
-# However, we did not manage to collect the profiles only from the following
-# benchmarks: smoothness.tough_filters_cases,
-# smoothness.tough_path_rendering_cases, page_cycler_v2.tough_layout_cases,
-# smoothness.tough_webgl_cases, smoothness.tough_canvas_cases,
-# smoothness.tough_texture_upload_cases, smoothness.tough_scrolling_cases,
-# smoothness.tough_webgl_ad_cases, smoothness.tough_image_decode_cases.
-#
-# Use ./run_benchmark --browser=cros-chrome --remote=$IP list to get the list of
-# Telemetry benchmarks.
-readonly LATEST_PERF_PROFILE=/tmp/test_that_latest/results-1-telemetry_Crosperf/telemetry_Crosperf/profiling/perf.data
-
-while read benchmark
-do
- # TODO(evelinad): We should add -F 4000000 to the list of profiler_args
- # arguments because we need to use the same sampling period as the one used
- # to collect the CWP user data (4M number of cycles for cycles.callgraph).
- test_that --debug --board=${BOARD} --args=" profiler=custom_perf \
- profiler_args='record -g -a -e cycles,instructions' \
- run_local=False test=$benchmark " $IP telemetry_Crosperf
- if [ $? -ne 0 ]; then
- echo "Failed to run the $benchmark telemetry benchmark with Autotest."
- continue
- fi
- echo "Warning: Sampling period is too high. It should be set to 4M samples."
-
- cp "$LATEST_PERF_PROFILE" "$RESULTS_PATH/${benchmark}.data"
- if [ $? -ne 0 ]; then
- echo "Failed to move the perf profile file from $LATEST_PERF_PROFILE to " \
- "$PERF_DATA_RESULTS_PATH/${benchmark}.data for the $benchmark " \
- "telemetry benchmark."
- continue
- fi
-
- # The ssh connection should be configured without password. We need to do
- # this step because we might run out of disk space if we run multiple
- # benchmarks.
- ssh root@$IP "rm -rf /usr/local/profilers/*"
- if [ $? -ne 0 ]; then
- echo "Failed to remove the output files from /usr/local/profilers/ for " \
- "the $benchmark telemetry benchmark."
- continue
- fi
-done < $TELEMETRY_BENCHMARKS_FILE
-
diff --git a/user_activity_benchmarks/cwp_hot_functions_groups.txt b/user_activity_benchmarks/cwp_hot_functions_groups.txt
deleted file mode 100644
index 3a1f893b..00000000
--- a/user_activity_benchmarks/cwp_hot_functions_groups.txt
+++ /dev/null
@@ -1,314 +0,0 @@
-third_party_accessibility_audit home/chrome-bot/chrome_root/src/third_party/accessibility-audit
-third_party_accessibility_test_framework home/chrome-bot/chrome_root/src/third_party/accessibility_test_framework
-third_party_adobe home/chrome-bot/chrome_root/src/third_party/adobe
-third_party_afl home/chrome-bot/chrome_root/src/third_party/afl
-third_party_analytics home/chrome-bot/chrome_root/src/third_party/analytics
-third_party_android_async_task home/chrome-bot/chrome_root/src/third_party/android_async_task
-third_party_android_crazy_linker home/chrome-bot/chrome_root/src/third_party/android_crazy_linker
-third_party_android_data_chart home/chrome-bot/chrome_root/src/third_party/android_data_chart
-third_party_android_media home/chrome-bot/chrome_root/src/third_party/android_media
-third_party_android_opengl home/chrome-bot/chrome_root/src/third_party/android_opengl
-third_party_android_platform home/chrome-bot/chrome_root/src/third_party/android_platform
-third_party_android_protobuf home/chrome-bot/chrome_root/src/third_party/android_protobuf
-third_party_android_support_test_runner home/chrome-bot/chrome_root/src/third_party/android_support_test_runner
-third_party_android_swipe_refresh home/chrome-bot/chrome_root/src/third_party/android_swipe_refresh
-third_party_angle home/chrome-bot/chrome_root/src/third_party/angle
-third_party_apache-portable-runtime home/chrome-bot/chrome_root/src/third_party/apache-portable-runtime
-third_party_apache_velocity home/chrome-bot/chrome_root/src/third_party/apache_velocity
-third_party_apache-win32 home/chrome-bot/chrome_root/src/third_party/apache-win32
-third_party_apple_apsl home/chrome-bot/chrome_root/src/third_party/apple_apsl
-third_party_apple_sample_code home/chrome-bot/chrome_root/src/third_party/apple_sample_code
-third_party_appurify-python home/chrome-bot/chrome_root/src/third_party/appurify-python
-third_party_ashmem home/chrome-bot/chrome_root/src/third_party/ashmem
-third_party_bidichecker home/chrome-bot/chrome_root/src/third_party/bidichecker
-third_party_bintrees home/chrome-bot/chrome_root/src/third_party/bintrees
-third_party_binutils home/chrome-bot/chrome_root/src/third_party/binutils
-third_party_blanketjs home/chrome-bot/chrome_root/src/third_party/blanketjs
-third_party_blimp_fonts home/chrome-bot/chrome_root/src/third_party/blimp_fonts
-third_party_boringssl home/chrome-bot/chrome_root/src/third_party/boringssl
-third_party_boringssl home/chrome-bot/chrome_root/src/third_party/boringssl/
-third_party_bouncycastle home/chrome-bot/chrome_root/src/third_party/bouncycastle
-third_party_brotli home/chrome-bot/chrome_root/src/third_party/brotli
-third_party_bspatch home/chrome-bot/chrome_root/src/third_party/bspatch
-third_party_cacheinvalidation home/chrome-bot/chrome_root/src/third_party/cacheinvalidation
-third_party_cardboard-java home/chrome-bot/chrome_root/src/third_party/cardboard-java
-third_party_catapult home/chrome-bot/chrome_root/src/third_party/catapult
-third_party_ced home/chrome-bot/chrome_root/src/third_party/ced
-third_party_chaijs home/chrome-bot/chrome_root/src/third_party/chaijs
-third_party_checkstyle home/chrome-bot/chrome_root/src/third_party/checkstyle
-third_party_chromite home/chrome-bot/chrome_root/src/third_party/chromite
-third_party_class-dump home/chrome-bot/chrome_root/src/third_party/class-dump
-third_party_cld_2 home/chrome-bot/chrome_root/src/third_party/cld_2
-third_party_cld_3 home/chrome-bot/chrome_root/src/third_party/cld_3
-third_party_closure_compiler home/chrome-bot/chrome_root/src/third_party/closure_compiler
-third_party_closure_linter home/chrome-bot/chrome_root/src/third_party/closure_linter
-third_party_codesighs home/chrome-bot/chrome_root/src/third_party/codesighs
-third_party_colorama home/chrome-bot/chrome_root/src/third_party/colorama
-third_party_crashpad home/chrome-bot/chrome_root/src/third_party/crashpad
-third_party_cros_system_api home/chrome-bot/chrome_root/src/third_party/cros_system_api
-third_party_custom_tabs_client home/chrome-bot/chrome_root/src/third_party/custom_tabs_client
-third_party_cython home/chrome-bot/chrome_root/src/third_party/cython
-third_party_d3 home/chrome-bot/chrome_root/src/third_party/d3
-third_party_decklink home/chrome-bot/chrome_root/src/third_party/decklink
-third_party_deqp home/chrome-bot/chrome_root/src/third_party/deqp
-third_party_devscripts home/chrome-bot/chrome_root/src/third_party/devscripts
-third_party_dom_distiller_js home/chrome-bot/chrome_root/src/third_party/dom_distiller_js
-third_party_drmemory home/chrome-bot/chrome_root/src/third_party/drmemory
-third_party_elfutils home/chrome-bot/chrome_root/src/third_party/elfutils
-third_party_errorprone home/chrome-bot/chrome_root/src/third_party/errorprone
-third_party_espresso home/chrome-bot/chrome_root/src/third_party/espresso
-third_party_expat home/chrome-bot/chrome_root/src/third_party/expat
-third_party_ffmpeg home/chrome-bot/chrome_root/src/third_party/ffmpeg
-third_party_fips181 home/chrome-bot/chrome_root/src/third_party/fips181
-third_party_flac home/chrome-bot/chrome_root/src/third_party/flac
-third_party_flatbuffers home/chrome-bot/chrome_root/src/third_party/flatbuffers
-third_party_flot home/chrome-bot/chrome_root/src/third_party/flot
-third_party_fontconfig home/chrome-bot/chrome_root/src/third_party/fontconfig
-third_party_freetype2 home/chrome-bot/chrome_root/src/third_party/freetype2
-third_party_freetype-android home/chrome-bot/chrome_root/src/third_party/freetype-android
-third_party_fuzzymatch home/chrome-bot/chrome_root/src/third_party/fuzzymatch
-third_party_gardiner_mod home/chrome-bot/chrome_root/src/third_party/gardiner_mod
-third_party_gif_player home/chrome-bot/chrome_root/src/third_party/gif_player
-third_party_gles2_conform home/chrome-bot/chrome_root/src/third_party/gles2_conform
-third_party_glslang home/chrome-bot/chrome_root/src/third_party/glslang
-third_party_google_appengine_cloudstorage home/chrome-bot/chrome_root/src/third_party/google_appengine_cloudstorage
-third_party_google_input_tools home/chrome-bot/chrome_root/src/third_party/google_input_tools
-third_party_google_toolbox_for_mac home/chrome-bot/chrome_root/src/third_party/google_toolbox_for_mac
-third_party_grpc home/chrome-bot/chrome_root/src/third_party/grpc
-third_party_guava home/chrome-bot/chrome_root/src/third_party/guava
-third_party_haha home/chrome-bot/chrome_root/src/third_party/haha
-third_party_hamcrest home/chrome-bot/chrome_root/src/third_party/hamcrest
-third_party_harfbuzz-ng home/chrome-bot/chrome_root/src/third_party/harfbuzz-ng
-third_party_hunspell home/chrome-bot/chrome_root/src/third_party/hunspell
-third_party_hunspell_dictionaries home/chrome-bot/chrome_root/src/third_party/hunspell_dictionaries
-third_party_hwcplus home/chrome-bot/chrome_root/src/third_party/hwcplus
-third_party_iaccessible2 home/chrome-bot/chrome_root/src/third_party/iaccessible2
-third_party_iccjpeg home/chrome-bot/chrome_root/src/third_party/iccjpeg
-third_party_icu home/chrome-bot/chrome_root/src/third_party/icu
-third_party_icu4j home/chrome-bot/chrome_root/src/third_party/icu4j
-third_party_ijar home/chrome-bot/chrome_root/src/third_party/ijar
-third_party_instrumented_libraries home/chrome-bot/chrome_root/src/third_party/instrumented_libraries
-third_party_intellij home/chrome-bot/chrome_root/src/third_party/intellij
-third_party_isimpledom home/chrome-bot/chrome_root/src/third_party/isimpledom
-third_party_javax_inject home/chrome-bot/chrome_root/src/third_party/javax_inject
-third_party_jinja2 home/chrome-bot/chrome_root/src/third_party/jinja2
-third_party_jmake home/chrome-bot/chrome_root/src/third_party/jmake
-third_party_jsoncpp home/chrome-bot/chrome_root/src/third_party/jsoncpp
-third_party_jsr-305 home/chrome-bot/chrome_root/src/third_party/jsr-305
-third_party_jstemplate home/chrome-bot/chrome_root/src/third_party/jstemplate
-third_party_junit home/chrome-bot/chrome_root/src/third_party/junit
-third_party_kasko home/chrome-bot/chrome_root/src/third_party/kasko
-third_party_khronos home/chrome-bot/chrome_root/src/third_party/khronos
-third_party_khronos_glcts home/chrome-bot/chrome_root/src/third_party/khronos_glcts
-third_party_lcov home/chrome-bot/chrome_root/src/third_party/lcov
-third_party_leakcanary home/chrome-bot/chrome_root/src/third_party/leakcanary
-third_party_leveldatabase home/chrome-bot/chrome_root/src/third_party/leveldatabase
-third_party_libaddressinput home/chrome-bot/chrome_root/src/third_party/libaddressinput
-third_party_libc++-static home/chrome-bot/chrome_root/src/third_party/libc++-static
-third_party_libFuzzer home/chrome-bot/chrome_root/src/third_party/libFuzzer
-third_party_libjingle home/chrome-bot/chrome_root/src/third_party/libjingle
-third_party_libjpeg home/chrome-bot/chrome_root/src/third_party/libjpeg
-third_party_libjpeg_turbo home/chrome-bot/chrome_root/src/third_party/libjpeg_turbo
-third_party_liblouis home/chrome-bot/chrome_root/src/third_party/liblouis
-third_party_libphonenumber home/chrome-bot/chrome_root/src/third_party/libphonenumber
-third_party_libpng home/chrome-bot/chrome_root/src/third_party/libpng
-third_party_libsecret home/chrome-bot/chrome_root/src/third_party/libsecret
-third_party_libsrtp home/chrome-bot/chrome_root/src/third_party/libsrtp
-third_party_libsync home/chrome-bot/chrome_root/src/third_party/libsync
-third_party_libudev home/chrome-bot/chrome_root/src/third_party/libudev
-third_party_libusb home/chrome-bot/chrome_root/src/third_party/libusb
-third_party_libva home/chrome-bot/chrome_root/src/third_party/libva
-third_party_libvpx home/chrome-bot/chrome_root/src/third_party/libvpx
-third_party_libwebm home/chrome-bot/chrome_root/src/third_party/libwebm
-third_party_libwebp home/chrome-bot/chrome_root/src/third_party/libwebp
-third_party_libxml home/chrome-bot/chrome_root/src/third_party/libxml
-third_party_libXNVCtrl home/chrome-bot/chrome_root/src/third_party/libXNVCtrl
-third_party_libxslt home/chrome-bot/chrome_root/src/third_party/libxslt
-third_party_libyuv home/chrome-bot/chrome_root/src/third_party/libyuv
-third_party_llvm-build home/chrome-bot/chrome_root/src/third_party/llvm-build
-third_party_logilab home/chrome-bot/chrome_root/src/third_party/logilab
-third_party_lss home/chrome-bot/chrome_root/src/third_party/lss
-third_party_lzma_sdk home/chrome-bot/chrome_root/src/third_party/lzma_sdk
-third_party_mach_override home/chrome-bot/chrome_root/src/third_party/mach_override
-third_party_markdown home/chrome-bot/chrome_root/src/third_party/markdown
-third_party_markupsafe home/chrome-bot/chrome_root/src/third_party/markupsafe
-third_party_mesa home/chrome-bot/chrome_root/src/third_party/mesa
-third_party_minigbm home/chrome-bot/chrome_root/src/third_party/minigbm
-third_party_mocha home/chrome-bot/chrome_root/src/third_party/mocha
-third_party_mockito home/chrome-bot/chrome_root/src/third_party/mockito
-third_party_modp_b64 home/chrome-bot/chrome_root/src/third_party/modp_b64
-third_party_molokocacao home/chrome-bot/chrome_root/src/third_party/molokocacao
-third_party_motemplate home/chrome-bot/chrome_root/src/third_party/motemplate
-third_party_mozilla home/chrome-bot/chrome_root/src/third_party/mozilla
-third_party_mt19937ar home/chrome-bot/chrome_root/src/third_party/mt19937ar
-third_party_netty4 home/chrome-bot/chrome_root/src/third_party/netty4
-third_party_netty-tcnative home/chrome-bot/chrome_root/src/third_party/netty-tcnative
-third_party_ocmock home/chrome-bot/chrome_root/src/third_party/ocmock
-third_party_openh264 home/chrome-bot/chrome_root/src/third_party/openh264
-third_party_openmax_dl home/chrome-bot/chrome_root/src/third_party/openmax_dl
-third_party_opus home/chrome-bot/chrome_root/src/third_party/opus
-third_party_ots home/chrome-bot/chrome_root/src/third_party/ots
-third_party_ow2_asm home/chrome-bot/chrome_root/src/third_party/ow2_asm
-third_party_pdfium home/chrome-bot/chrome_root/src/third_party/pdfium
-third_party_pexpect home/chrome-bot/chrome_root/src/third_party/pexpect
-third_party_ply home/chrome-bot/chrome_root/src/third_party/ply
-third_party_polymer home/chrome-bot/chrome_root/src/third_party/polymer
-third_party_PRESUBMIT.py home/chrome-bot/chrome_root/src/third_party/PRESUBMIT.py
-third_party_proguard home/chrome-bot/chrome_root/src/third_party/proguard
-third_party_protobuf home/chrome-bot/chrome_root/src/third_party/protobuf
-third_party_pycoverage home/chrome-bot/chrome_root/src/third_party/pycoverage
-third_party_pyelftools home/chrome-bot/chrome_root/src/third_party/pyelftools
-third_party_pyftpdlib home/chrome-bot/chrome_root/src/third_party/pyftpdlib
-third_party_pylint home/chrome-bot/chrome_root/src/third_party/pylint
-third_party_pymock home/chrome-bot/chrome_root/src/third_party/pymock
-third_party_python_gflags home/chrome-bot/chrome_root/src/third_party/python_gflags
-third_party_Python-Markdown home/chrome-bot/chrome_root/src/third_party/Python-Markdown
-third_party_py_trace_event home/chrome-bot/chrome_root/src/third_party/py_trace_event
-third_party_pywebsocket home/chrome-bot/chrome_root/src/third_party/pywebsocket
-third_party_qcms home/chrome-bot/chrome_root/src/third_party/qcms
-third_party_qunit home/chrome-bot/chrome_root/src/third_party/qunit
-third_party_re2 home/chrome-bot/chrome_root/src/third_party/re2
-third_party_requests home/chrome-bot/chrome_root/src/third_party/requests
-third_party_robolectric home/chrome-bot/chrome_root/src/third_party/robolectric
-third_party_scons-2.0.1 home/chrome-bot/chrome_root/src/third_party/scons-2.0.1
-third_party_sfntly home/chrome-bot/chrome_root/src/third_party/sfntly
-third_party_shaderc home/chrome-bot/chrome_root/src/third_party/shaderc
-third_party_simplejson home/chrome-bot/chrome_root/src/third_party/simplejson
-third_party_sinonjs home/chrome-bot/chrome_root/src/third_party/sinonjs
-third_party_skia home/chrome-bot/chrome_root/src/third_party/skia
-third_party_smhasher home/chrome-bot/chrome_root/src/third_party/smhasher
-third_party_snappy home/chrome-bot/chrome_root/src/third_party/snappy
-third_party_speech-dispatcher home/chrome-bot/chrome_root/src/third_party/speech-dispatcher
-third_party_SPIRV-Tools home/chrome-bot/chrome_root/src/third_party/SPIRV-Tools
-third_party_sqlite home/chrome-bot/chrome_root/src/third_party/sqlite
-third_party_sqlite4java home/chrome-bot/chrome_root/src/third_party/sqlite4java
-third_party_sudden_motion_sensor home/chrome-bot/chrome_root/src/third_party/sudden_motion_sensor
-third_party_swiftshader home/chrome-bot/chrome_root/src/third_party/swiftshader
-third_party_talloc home/chrome-bot/chrome_root/src/third_party/talloc
-third_party_tcmalloc home/chrome-bot/chrome_root/src/third_party/tcmalloc
-third_party_tlslite home/chrome-bot/chrome_root/src/third_party/tlslite
-third_party_typ home/chrome-bot/chrome_root/src/third_party/typ
-third_party_ub-uiautomator home/chrome-bot/chrome_root/src/third_party/ub-uiautomator
-third_party_usb_ids home/chrome-bot/chrome_root/src/third_party/usb_ids
-third_party_usrsctp home/chrome-bot/chrome_root/src/third_party/usrsctp
-third_party_v4l2capture home/chrome-bot/chrome_root/src/third_party/v4l2capture
-third_party_v4l-utils home/chrome-bot/chrome_root/src/third_party/v4l-utils
-third_party_vulkan home/chrome-bot/chrome_root/src/third_party/vulkan
-third_party_wayland home/chrome-bot/chrome_root/src/third_party/wayland
-third_party_wayland-protocols home/chrome-bot/chrome_root/src/third_party/wayland-protocols
-third_party_wds home/chrome-bot/chrome_root/src/third_party/wds
-third_party_web-animations-js home/chrome-bot/chrome_root/src/third_party/web-animations-js
-third_party_webdriver home/chrome-bot/chrome_root/src/third_party/webdriver
-third_party_webgl home/chrome-bot/chrome_root/src/third_party/webgl
-third_party_WebKit home/chrome-bot/chrome_root/src/third_party/WebKit
-third_party_webpagereplay home/chrome-bot/chrome_root/src/third_party/webpagereplay
-third_party_webrtc home/chrome-bot/chrome_root/src/third_party/webrtc
-third_party_webrtc_overrides home/chrome-bot/chrome_root/src/third_party/webrtc_overrides
-third_party_webtreemap home/chrome-bot/chrome_root/src/third_party/webtreemap
-third_party_widevine home/chrome-bot/chrome_root/src/third_party/widevine
-third_party_woff2 home/chrome-bot/chrome_root/src/third_party/woff2
-third_party_wtl home/chrome-bot/chrome_root/src/third_party/wtl
-third_party_x86inc home/chrome-bot/chrome_root/src/third_party/x86inc
-third_party_xdg-utils home/chrome-bot/chrome_root/src/third_party/xdg-utils
-third_party_yasm home/chrome-bot/chrome_root/src/third_party/yasm
-third_party_zlib home/chrome-bot/chrome_root/src/third_party/zlib
-android_webview home/chrome-bot/chrome_root/src/android_webview
-apps home/chrome-bot/chrome_root/src/apps
-ash home/chrome-bot/chrome_root/src/ash
-base home/chrome-bot/chrome_root/src/base
-blimp home/chrome-bot/chrome_root/src/blimp
-blink home/chrome-bot/chrome_root/src/blink
-breakpad home/chrome-bot/chrome_root/src/breakpad
-build home/chrome-bot/chrome_root/src/build
-build_overrides home/chrome-bot/chrome_root/src/build_overrides
-buildtools home/chrome-bot/chrome_root/src/buildtools
-cc home/chrome-bot/chrome_root/src/cc/
-chrome home/chrome-bot/chrome_root/src/chrome/
-chromecast home/chrome-bot/chrome_root/src/chromecast/
-chrome_elf home/chrome-bot/chrome_root/src/chrome_elf
-chromeos home/chrome-bot/chrome_root/src/chromeos
-components home/chrome-bot/chrome_root/src/components
-content home/chrome-bot/chrome_root/src/content
-courgette home/chrome-bot/chrome_root/src/courgette
-crypto home/chrome-bot/chrome_root/src/crypto
-data home/chrome-bot/chrome_root/src/data
-dbus home/chrome-bot/chrome_root/src/dbus
-DEPS home/chrome-bot/chrome_root/src/DEPS
-device home/chrome-bot/chrome_root/src/device
-docs home/chrome-bot/chrome_root/src/docs
-extensions home/chrome-bot/chrome_root/src/extensions
-gin home/chrome-bot/chrome_root/src/gin
-google_apis home/chrome-bot/chrome_root/src/google_apis
-google_update home/chrome-bot/chrome_root/src/google_update
-gpu home/chrome-bot/chrome_root/src/gpu
-headless home/chrome-bot/chrome_root/src/headless
-infra home/chrome-bot/chrome_root/src/infra
-internal_gyp home/chrome-bot/chrome_root/src/internal_gyp
-ios home/chrome-bot/chrome_root/src/ios
-ipc home/chrome-bot/chrome_root/src/ipc
-jingle home/chrome-bot/chrome_root/src/jingle
-mash home/chrome-bot/chrome_root/src/mash
-media home/chrome-bot/chrome_root/src/media
-mojo home/chrome-bot/chrome_root/src/mojo
-native_client home/chrome-bot/chrome_root/src/native_client
-native_client_sdk home/chrome-bot/chrome_root/src/native_client_sdk
-net home/chrome-bot/chrome_root/src/net
-out home/chrome-bot/chrome_root/src/out
-out_BOARD home/chrome-bot/chrome_root/src/out_BOARD
-pdf home/chrome-bot/chrome_root/src/pdf
-ppapi home/chrome-bot/chrome_root/src/ppapi
-printing home/chrome-bot/chrome_root/src/printing
-remoting home/chrome-bot/chrome_root/src/remoting
-rlz home/chrome-bot/chrome_root/src/rlz
-sandbox home/chrome-bot/chrome_root/src/sandbox
-sdch home/chrome-bot/chrome_root/src/sdch
-services home/chrome-bot/chrome_root/src/services
-skia home/chrome-bot/chrome_root/src/skia
-sql home/chrome-bot/chrome_root/src/sql
-storage home/chrome-bot/chrome_root/src/storage
-styleguide home/chrome-bot/chrome_root/src/styleguide
-sync home/chrome-bot/chrome_root/src/sync
-testing home/chrome-bot/chrome_root/src/testing
-tools home/chrome-bot/chrome_root/src/tools
-ui home/chrome-bot/chrome_root/src/ui
-url home/chrome-bot/chrome_root/src/url
-v8 home/chrome-bot/chrome_root/src/v8
-webkit home/chrome-bot/chrome_root/src/webkit
-third_party_kernel /mnt/host/source/src/third_party/kernel
-build_sys-kernel /build/BOARD/var/cache/portage/sys-kernel
-build_var_cache_portage /build/BOARD/var/cache/portage
-build_pepper_flash /build/BOARD/tmp/portage/chromeos-base/pepper-flash
-build_media_sound /build/BOARD/tmp/portage/media-sound/
-build_media_libs /build/BOARD/tmp/portage/media-libs/
-build_net_dns /build/BOARD/tmp/portage/net-dns
-build_sys_apps /build/BOARD/tmp/portage/sys-apps
-build_app_shells /build/BOARD/tmp/portage/app-shells
-build_x11_libs /build/BOARD/tmp/portage/x11-libs
-build_dev_libs /build/BOARD/tmp/portage/dev-libs
-build_dev_db /build/BOARD/tmp/portage/dev-db
-build_sys_libs /build/BOARD/tmp/portage/sys-libs
-build_app_arch /build/BOARD/tmp/portage/app-arch
-build_app_crypt /build/BOARD/tmp/portage/app-crypt
-build_rsyslog /build/BOARD/tmp/portage/app-admin/rsyslog
-build_net_misc /build/BOARD/tmp/portage/net-misc
-build_sys_fs /build/BOARD/tmp/portage/sys-fs
-build_update_engine /build/BOARD/tmp/portage/chromeos-base/update_engine
-build_libchrome /build/BOARD/tmp/portage/chromeos-base/libchrome
-build_gestures /build/BOARD/tmp/portage/chromeos-base/gestures
-build_libbrillo /build/BOARD/tmp/portage/chromeos-base/libbrillo
-build_shill /build/BOARD/tmp/portage/chromeos-base/shill
-build_libevdev /build/BOARD/tmp/portage/chromeos-base/libevdev
-build_chromeos_base /build/BOARD/tmp/portage/chromeos-base
-build_net_wireless /build/BOARD/tmp/portage/net-wireless
-build_sys_power /build/BOARD/tmp/portage/sys-power/
-build_tmp_portage /build/BOARD/tmp/portage
-usr_include /build/BOARD/usr/include
-blink_bindings /var/cache/chromeos-chrome/chrome-src-internal/src/out_BOARD/Release/gen/blink/bindings/
-var_cache /var/cache
-gcc_stl /usr/lib/gcc/x86_64-cros-linux-gnu/
-gcc_stl /mnt/host/source/src/third_party/gcc/
-libc /var/tmp/portage/cross-x86_64-cros-linux-gnu/
-libc sysdeps/
-libc nptl/
-others /
-others .
diff --git a/user_activity_benchmarks/process_hot_functions.py b/user_activity_benchmarks/process_hot_functions.py
deleted file mode 100755
index 2fbf3f93..00000000
--- a/user_activity_benchmarks/process_hot_functions.py
+++ /dev/null
@@ -1,482 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Processes the functions from the pprof(go/pprof) files and CWP(go/cwp) data.
-
-The pprof --top and pprof --tree outputs should be extracted from the benchmark
-profiles. The outputs contain the hot functions and the call chains.
-
-For each pair of pprof --top and --tree output files, the tool will create a
-file that contains the hot functions present also in the extracted CWP data.
-The common functions are organized in groups that represent a Chrome OS
-component. A function belongs to a group that is defined by a given file path
-if it is declared in a file that shares that path.
-
-A set of metrics are computed for each function, benchmark and Chrome OS group
-covered by a benchmark.
-
-Afterwards, this script extracts the functions that are present in the CWP
-data and not in the benchmark profiles. The extra functions are also groupped
-in Chrome OS components.
-"""
-
-from collections import defaultdict
-
-import argparse
-import os
-import shutil
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class HotFunctionsProcessor(object):
- """Does the pprof and CWP output processing.
-
- Extracts the common, extra functions from the pprof files, groups them in
- Chrome OS components. Computes the metrics for the common functions,
- benchmark and Chrome OS groups covered by a benchmark.
- """
-
- def __init__(self, pprof_top_path, pprof_tree_path, cwp_inclusive_count_file,
- cwp_pairwise_inclusive_count_file, cwp_function_groups_file,
- common_functions_path, common_functions_groups_path,
- benchmark_set_metrics_file, extra_cwp_functions_file,
- extra_cwp_functions_groups_file,
- extra_cwp_functions_groups_path):
- """Initializes the HotFunctionsProcessor.
-
- Args:
- pprof_top_path: The directory containing the files with the pprof --top
- output.
- pprof_tree_path: The directory containing the files with the pprof --tree
- output.
- cwp_inclusive_count_file: The CSV file containing the CWP functions with
- the inclusive count values.
- cwp_pairwise_inclusive_count_file: The CSV file containing the CWP pairs
- of parent and child functions with their inclusive count values.
- cwp_function_groups_file: The file that contains the CWP function groups.
- common_functions_path: The directory containing the CSV output files
- with the common functions of the benchmark profiles and CWP data.
- common_functions_groups_path: The directory containing the CSV output
- files with the CWP groups and their metrics that match the common
- functions of the benchmark profiles and CWP.
- benchmark_set_metrics_file: The CSV output file containing the metrics for
- each benchmark.
- extra_cwp_functions_file: The CSV output file containing the functions
- that are in the CWP data, but are not in any of the benchmark profiles.
- extra_cwp_functions_groups_file: The CSV output file containing the groups
- that match the extra CWP functions and their statistics.
- extra_cwp_functions_groups_path: The directory containing the CSV output
- files with the extra CWP functions that match a particular group.
- """
- self._pprof_top_path = pprof_top_path
- self._pprof_tree_path = pprof_tree_path
- self._cwp_inclusive_count_file = cwp_inclusive_count_file
- self._cwp_pairwise_inclusive_count_file = cwp_pairwise_inclusive_count_file
- self._cwp_function_groups_file = cwp_function_groups_file
- self._common_functions_path = common_functions_path
- self._common_functions_groups_path = common_functions_groups_path
- self._benchmark_set_metrics_file = benchmark_set_metrics_file
- self._extra_cwp_functions_file = extra_cwp_functions_file
- self._extra_cwp_functions_groups_file = extra_cwp_functions_groups_file
- self._extra_cwp_functions_groups_path = extra_cwp_functions_groups_path
-
- def ProcessHotFunctions(self):
- """Does the processing of the hot functions."""
- with open(self._cwp_function_groups_file) as input_file:
- cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
- cwp_statistics = \
- self.ExtractCommonFunctions(self._pprof_top_path,
- self._pprof_tree_path,
- self._cwp_inclusive_count_file,
- self._cwp_pairwise_inclusive_count_file,
- cwp_function_groups,
- self._common_functions_path,
- self._common_functions_groups_path,
- self._benchmark_set_metrics_file)
- self.ExtractExtraFunctions(cwp_statistics, self._extra_cwp_functions_file)
- self.GroupExtraFunctions(cwp_statistics, cwp_function_groups,
- self._extra_cwp_functions_groups_path,
- self._extra_cwp_functions_groups_file)
-
- def ExtractCommonFunctions(self, pprof_top_path, pprof_tree_path,
- cwp_inclusive_count_file,
- cwp_pairwise_inclusive_count_file,
- cwp_function_groups, common_functions_path,
- common_functions_groups_path,
- benchmark_set_metrics_file):
- """Extracts the common functions of the benchmark profiles and the CWP data.
-
- For each pair of pprof --top and --tree output files, it creates a separate
- file with the same name containing the common functions specifications and
- metrics, that will be placed in the common_functions_path directory.
-
- The resulting file is in CSV format, containing the following fields:
- function name, file name, object, inclusive count, inclusive_count_fraction,
- flat, flat%, sum%, cum, cum%, distance and score.
-
- For each pair of pprof files, an additional file is created with the
- Chrome OS groups that match the common functions.
-
- The file is in CSV format containing the fields: group name, group path,
- the number of functions that match the group, the average and cumulative
- distance, the average and cumulative score.
- The file has the same name with the pprof file and it is placed in the
- common_functions_groups_path directory.
-
- For all the analyzed benchmarks, the method creates a CSV output file
- containing the metrics for each benchmark. The CSV fields include the
- benchmark name, the number of common functions, the average and
- cumulative distance and score.
-
- It builds a dict of the CWP statistics by calling the
- utils.ParseCWPInclusiveCountFile method and if a function is common, it is
- marked as a COMMON_FUNCTION.
-
- Args:
- pprof_top_path: The name of the directory with the files with the
- pprof --top output.
- pprof_tree_path: The name of the directory with the files with the
- pprof --tree output.
- cwp_inclusive_count_file: A dict with the inclusive count values.
- cwp_pairwise_inclusive_count_file: A dict with the pairwise inclusive
- count values.
- cwp_function_groups: A list of tuples containing the name of the group
- and the corresponding file path.
- common_functions_path: The path containing the output files with the
- common functions and their metrics.
- common_functions_groups_path: The path containing the output files with
- the Chrome OS groups that match the common functions and their metrics.
- benchmark_set_metrics_file: The CSV output file containing the metrics for
- all the analyzed benchmarks.
-
- Returns:
- A dict containing the CWP statistics with the common functions marked as
- COMMON_FUNCTION.
- """
- cwp_inclusive_count_statistics = \
- utils.ParseCWPInclusiveCountFile(cwp_inclusive_count_file)
- cwp_pairwise_inclusive_count_statistics = \
- utils.ParseCWPPairwiseInclusiveCountFile(
- cwp_pairwise_inclusive_count_file)
- cwp_inclusive_count_statistics_cumulative = \
- utils.ComputeCWPCummulativeInclusiveStatistics(
- cwp_inclusive_count_statistics)
- cwp_pairwise_inclusive_count_fractions = \
- utils.ComputeCWPChildFunctionsFractions(
- cwp_inclusive_count_statistics_cumulative,
- cwp_pairwise_inclusive_count_statistics)
- benchmark_set_metrics = {}
- pprof_files = os.listdir(pprof_top_path)
-
- for pprof_file in pprof_files:
- pprof_top_statistics = \
- utils.ParsePprofTopOutput(os.path.join(pprof_top_path, pprof_file))
- pprof_tree_statistics = \
- utils.ParsePprofTreeOutput(os.path.join(pprof_tree_path, pprof_file))
- common_functions_lines = []
- benchmark_function_metrics = {}
-
- for function_key, function_statistic in pprof_top_statistics.iteritems():
- if function_key not in cwp_inclusive_count_statistics:
- continue
-
- cwp_dso_name, cwp_inclusive_count, cwp_inclusive_count_fraction, _ = \
- cwp_inclusive_count_statistics[function_key]
- cwp_inclusive_count_statistics[function_key] = \
- (cwp_dso_name, cwp_inclusive_count, cwp_inclusive_count_fraction,
- utils.COMMON_FUNCTION)
-
- function_name, _ = function_key.split(',')
- distance = benchmark_metrics.ComputeDistanceForFunction(
- pprof_tree_statistics[function_key],
- cwp_pairwise_inclusive_count_fractions.get(function_name, {}))
- benchmark_cum_p = float(function_statistic[4])
- score = benchmark_metrics.ComputeScoreForFunction(
- distance, cwp_inclusive_count_fraction, benchmark_cum_p)
- benchmark_function_metrics[function_key] = (distance, score)
-
- common_functions_lines.append(','.join([function_key, cwp_dso_name, str(
- cwp_inclusive_count), str(cwp_inclusive_count_fraction), ','.join(
- function_statistic), str(distance), str(score)]))
- benchmark_function_groups_statistics = \
- benchmark_metrics.ComputeMetricsForComponents(
- cwp_function_groups, benchmark_function_metrics)
- benchmark_set_metrics[pprof_file] = \
- benchmark_metrics.ComputeMetricsForBenchmark(
- benchmark_function_metrics)
-
- with open(os.path.join(common_functions_path, pprof_file), 'w') \
- as output_file:
- common_functions_lines.sort(
- key=lambda x: float(x.split(',')[11]), reverse=True)
- common_functions_lines.insert(0, 'function,file,dso,inclusive_count,'
- 'inclusive_count_fraction,flat,flat%,'
- 'sum%,cum,cum%,distance,score')
- output_file.write('\n'.join(common_functions_lines))
-
- with open(os.path.join(common_functions_groups_path, pprof_file), 'w') \
- as output_file:
- common_functions_groups_lines = \
- [','.join([group_name, ','.join(
- [str(statistic) for statistic in group_statistic])])
- for group_name, group_statistic in
- benchmark_function_groups_statistics.iteritems()]
- common_functions_groups_lines.sort(
- key=lambda x: float(x.split(',')[5]), reverse=True)
- common_functions_groups_lines.insert(
- 0, 'group_name,file_path,number_of_functions,distance_cum,'
- 'distance_avg,score_cum,score_avg')
- output_file.write('\n'.join(common_functions_groups_lines))
-
- with open(benchmark_set_metrics_file, 'w') as output_file:
- benchmark_set_metrics_lines = []
-
- for benchmark_name, metrics in benchmark_set_metrics.iteritems():
- benchmark_set_metrics_lines.append(','.join([benchmark_name, ','.join(
- [str(metric) for metric in metrics])]))
- benchmark_set_metrics_lines.sort(
- key=lambda x: float(x.split(',')[4]), reverse=True)
- benchmark_set_metrics_lines.insert(
- 0, 'benchmark_name,number_of_functions,distance_cum,distance_avg,'
- 'score_cum,score_avg')
- output_file.write('\n'.join(benchmark_set_metrics_lines))
-
- return cwp_inclusive_count_statistics
-
- def GroupExtraFunctions(self, cwp_statistics, cwp_function_groups,
- extra_cwp_functions_groups_path,
- extra_cwp_functions_groups_file):
- """Groups the extra functions.
-
- Writes the data of the functions that belong to each group in a separate
- file, sorted by their inclusive count value, in descending order. The file
- name is the same as the group name.
-
- The file is in CSV format, containing the fields: function name, file name,
- object name, inclusive count, inclusive count fraction.
-
- It creates a CSV file containing the name of the group, their
- common path, the total inclusive count and inclusive count fraction values
- of all the functions declared in files that share the common path, sorted
- in descending order by the inclusive count value.
-
- Args:
- cwp_statistics: A dict containing the CWP statistics.
- cwp_function_groups: A list of tuples with the groups names and the path
- describing the groups.
- extra_cwp_functions_groups_path: The name of the directory containing
- the CSV output files with the extra CWP functions that match a
- particular group.
- extra_cwp_functions_groups_file: The CSV output file containing the groups
- that match the extra functions and their statistics.
- """
- cwp_function_groups_statistics = defaultdict(lambda: ([], '', 0, 0.0))
- for function, statistics in cwp_statistics.iteritems():
- if statistics[3] == utils.COMMON_FUNCTION:
- continue
-
- file_name = function.split(',')[1]
- group_inclusive_count = int(statistics[1])
- group_inclusive_count_fraction = float(statistics[2])
-
- for group in cwp_function_groups:
- group_common_path = group[1]
-
- if group_common_path not in file_name:
- continue
-
- group_name = group[0]
- group_statistics = cwp_function_groups_statistics[group_name]
- group_lines = group_statistics[0]
- group_inclusive_count += group_statistics[2]
- group_inclusive_count_fraction += group_statistics[3]
-
- group_lines.append(','.join([function, statistics[0],
- str(statistics[1]), str(statistics[2])]))
- cwp_function_groups_statistics[group_name] = \
- (group_lines, group_common_path, group_inclusive_count,
- group_inclusive_count_fraction)
- break
-
- extra_cwp_functions_groups_lines = []
- for group_name, group_statistics \
- in cwp_function_groups_statistics.iteritems():
- group_output_lines = group_statistics[0]
- group_output_lines.sort(key=lambda x: int(x.split(',')[3]), reverse=True)
- group_output_lines.insert(
- 0, 'function,file,dso,inclusive_count,inclusive_count_fraction')
- with open(os.path.join(extra_cwp_functions_groups_path, group_name),
- 'w') as output_file:
- output_file.write('\n'.join(group_output_lines))
- extra_cwp_functions_groups_lines.append(','.join(
- [group_name, group_statistics[1], str(group_statistics[2]), str(
- group_statistics[3])]))
-
- extra_cwp_functions_groups_lines.sort(
- key=lambda x: int(x.split(',')[2]), reverse=True)
- extra_cwp_functions_groups_lines.insert(
- 0, 'group,shared_path,inclusive_count,inclusive_count_fraction')
- with open(extra_cwp_functions_groups_file, 'w') as output_file:
- output_file.write('\n'.join(extra_cwp_functions_groups_lines))
-
- def ExtractExtraFunctions(self, cwp_statistics, extra_cwp_functions_file):
- """Gets the functions that are in the CWP data, but not in the pprof output.
-
- Writes the functions and their statistics in the extra_cwp_functions_file
- file. The output is sorted based on the inclusive_count value. The file is
- in CSV format, containing the fields: function name, file name, object name,
- inclusive count and inclusive count fraction.
-
- Args:
- cwp_statistics: A dict containing the CWP statistics indexed by the
- function and the file name, comma separated.
- extra_cwp_functions_file: The file where it should be stored the CWP
- functions and statistics that are marked as EXTRA_FUNCTION.
- """
- output_lines = []
-
- for function, statistics in cwp_statistics.iteritems():
- if statistics[3] == utils.EXTRA_FUNCTION:
- output_lines.append(','.join([function, statistics[0],
- str(statistics[1]), str(statistics[2])]))
-
- with open(extra_cwp_functions_file, 'w') as output_file:
- output_lines.sort(key=lambda x: int(x.split(',')[3]), reverse=True)
- output_lines.insert(0, 'function,file,dso,inclusive_count,'
- 'inclusive_count_fraction')
- output_file.write('\n'.join(output_lines))
-
-
-def ParseArguments(arguments):
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- '--pprof_top_path',
- required=True,
- help='The directory containing the files with the pprof --top output of '
- 'the benchmark profiles (the hot functions). The name of the files '
- 'should match with the ones from the pprof tree output files.')
- parser.add_argument(
- '--pprof_tree_path',
- required=True,
- help='The directory containing the files with the pprof --tree output '
- 'of the benchmark profiles (the call chains). The name of the files '
- 'should match with the ones of the pprof top output files.')
- parser.add_argument(
- '--cwp_inclusive_count_file',
- required=True,
- help='The CSV file containing the CWP hot functions with their '
- 'inclusive_count values. The CSV fields include the name of the '
- 'function, the file and the object with the definition, the inclusive '
- 'count value and the inclusive count fraction out of the total amount of '
- 'inclusive count values.')
- parser.add_argument(
- '--cwp_pairwise_inclusive_count_file',
- required=True,
- help='The CSV file containing the CWP pairs of parent and child '
- 'functions with their inclusive count values. The CSV fields include the '
- 'name of the parent and child functions concatenated by ;;, the file '
- 'and the object with the definition of the child function, and the '
- 'inclusive count value.')
- parser.add_argument(
- '--cwp_function_groups_file',
- required=True,
- help='The file that contains the CWP function groups. A line consists in '
- 'the group name and a file path describing the group. A group must '
- 'represent a ChromeOS component.')
- parser.add_argument(
- '--common_functions_path',
- required=True,
- help='The directory containing the CSV output files with the common '
- 'functions of the benchmark profiles and CWP data. A file will contain '
- 'all the hot functions from a pprof top output file that are also '
- 'included in the file containing the cwp inclusive count values. The CSV '
- 'fields are: the function name, the file and the object where the '
- 'function is declared, the CWP inclusive count and inclusive count '
- 'fraction values, the cumulative and average distance, the cumulative '
- 'and average score. The files with the common functions will have the '
- 'same names with the corresponding pprof output files.')
- parser.add_argument(
- '--common_functions_groups_path',
- required=True,
- help='The directory containing the CSV output files with the Chrome OS '
- 'groups and their metrics that match the common functions of the '
- 'benchmark profiles and CWP. The files with the groups will have the '
- 'same names with the corresponding pprof output files. The CSV fields '
- 'include the group name, group path, the number of functions that match '
- 'the group, the average and cumulative distance, the average and '
- 'cumulative score.')
- parser.add_argument(
- '--benchmark_set_metrics_file',
- required=True,
- help='The CSV output file containing the metrics for each benchmark. The '
- 'CSV fields include the benchmark name, the number of common functions, '
- 'the average and cumulative distance and score.')
- parser.add_argument(
- '--extra_cwp_functions_file',
- required=True,
- help='The CSV output file containing the functions that are in the CWP '
- 'data, but are not in any of the benchmark profiles. The CSV fields '
- 'include the name of the function, the file name and the object with the '
- 'definition, and the CWP inclusive count and inclusive count fraction '
- 'values. The entries are sorted in descending order based on the '
- 'inclusive count value.')
- parser.add_argument(
- '--extra_cwp_functions_groups_file',
- required=True,
- help='The CSV output file containing the groups that match the extra CWP '
- 'functions and their statistics. The CSV fields include the group name, '
- 'the file path, the total inclusive count and inclusive count fraction '
- 'values of the functions matching a particular group.')
- parser.add_argument(
- '--extra_cwp_functions_groups_path',
- required=True,
- help='The directory containing the CSV output files with the extra CWP '
- 'functions that match a particular group. The name of the file is the '
- 'same as the group name. The CSV fields include the name of the '
- 'function, the file name and the object with the definition, and the CWP '
- 'inclusive count and inclusive count fraction values. The entries are '
- 'sorted in descending order based on the inclusive count value.')
-
- options = parser.parse_args(arguments)
-
- return options
-
-
-def Main(argv):
- options = ParseArguments(argv)
-
- if os.path.exists(options.common_functions_path):
- shutil.rmtree(options.common_functions_path)
-
- os.makedirs(options.common_functions_path)
-
- if os.path.exists(options.common_functions_groups_path):
- shutil.rmtree(options.common_functions_groups_path)
-
- os.makedirs(options.common_functions_groups_path)
-
- if os.path.exists(options.extra_cwp_functions_groups_path):
- shutil.rmtree(options.extra_cwp_functions_groups_path)
-
- os.makedirs(options.extra_cwp_functions_groups_path)
-
- hot_functions_processor = HotFunctionsProcessor(
- options.pprof_top_path, options.pprof_tree_path,
- options.cwp_inclusive_count_file,
- options.cwp_pairwise_inclusive_count_file,
- options.cwp_function_groups_file, options.common_functions_path,
- options.common_functions_groups_path, options.benchmark_set_metrics_file,
- options.extra_cwp_functions_file, options.extra_cwp_functions_groups_file,
- options.extra_cwp_functions_groups_path)
-
- hot_functions_processor.ProcessHotFunctions()
-
-
-if __name__ == '__main__':
- Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/process_hot_functions_unittest.py b/user_activity_benchmarks/process_hot_functions_unittest.py
deleted file mode 100755
index 0ad248b1..00000000
--- a/user_activity_benchmarks/process_hot_functions_unittest.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the process_hot_functions module."""
-
-from process_hot_functions import HotFunctionsProcessor, ParseArguments
-
-import mock
-import os
-import shutil
-import tempfile
-import unittest
-
-
-class ParseArgumentsTest(unittest.TestCase):
- """Test class for command line argument parsing."""
-
- def __init__(self, *args, **kwargs):
- super(ParseArgumentsTest, self).__init__(*args, **kwargs)
-
- def testParseArguments(self):
- arguments = \
- ['-p', 'dummy_pprof', '-c', 'dummy_common', '-e', 'dummy_extra', '-w',
- 'dummy_cwp']
- options = ParseArguments(arguments)
-
- self.assertEqual(options.pprof_path, 'dummy_pprof')
- self.assertEqual(options.cwp_hot_functions_file, 'dummy_cwp')
- self.assertEqual(options.common_functions_path, 'dummy_common')
- self.assertEqual(options.extra_cwp_functions_file, 'dummy_extra')
-
- @mock.patch('sys.exit')
- def testDeathParseArguments(self, sys_exit_method):
- self.assertFalse(sys_exit_method.called)
- ParseArguments([])
- self.assertTrue(sys_exit_method.called)
- self.assertNotEqual(sys_exit_method.return_value, 0)
-
-
-class HotFunctionsProcessorTest(unittest.TestCase):
- """Test class for HotFunctionsProcessor class."""
-
- def __init__(self, *args, **kwargs):
- super(HotFunctionsProcessorTest, self).__init__(*args, **kwargs)
- self._pprof_path = 'testdata/input/pprof'
- self._cwp_functions_file = 'testdata/input/cwp_functions_file.csv'
- self._cwp_functions_file_parsing = \
- 'testdata/input/parse_cwp_statistics.csv'
- self._common_functions_path = ''
- self._expected_common_functions_path = 'testdata/expected/pprof_common'
- self._extra_cwp_functions_file = ''
- self._cwp_function_groups_file = 'testdata/input/cwp_function_groups'
- self._cwp_function_groups_statistics_file = 'dummy'
- self._cwp_function_groups_file_prefix = 'dummy'
-
- def _CreateHotFunctionsProcessor(self,
- extra_cwp_functions_file,
- cwp_function_groups_file=None,
- cwp_function_groups_statistics_file=None,
- cwp_function_groups_file_prefix=None):
- return HotFunctionsProcessor(self._pprof_path, self._cwp_functions_file,
- self._common_functions_path,
- extra_cwp_functions_file,
- cwp_function_groups_file,
- cwp_function_groups_statistics_file,
- cwp_function_groups_file_prefix)
-
- def checkFileContents(self, file_name, expected_content_lines):
- with open(file_name, 'r') as input_file:
- result_content_lines = input_file.readlines()
- self.assertListEqual(expected_content_lines, result_content_lines)
-
- @mock.patch.object(HotFunctionsProcessor, 'ExtractCommonFunctions')
- @mock.patch.object(HotFunctionsProcessor, 'ExtractExtraFunctions')
- @mock.patch.object(HotFunctionsProcessor, 'GroupExtraFunctions')
- def testProcessHotFunctionsNoGroupping(self, group_functions_method,
- extra_functions_method,
- common_functions_method):
- hot_functions_processor = self._CreateHotFunctionsProcessor(
- self._extra_cwp_functions_file)
-
- hot_functions_processor.ProcessHotFunctions()
-
- self.assertTrue(common_functions_method.called)
- self.assertTrue(extra_functions_method.called)
- self.assertEqual(common_functions_method.call_count, 1)
- self.assertEqual(extra_functions_method.call_count, 1)
- self.assertFalse(group_functions_method.called)
-
- @mock.patch.object(HotFunctionsProcessor, 'ExtractCommonFunctions')
- @mock.patch.object(HotFunctionsProcessor, 'ExtractExtraFunctions')
- @mock.patch.object(HotFunctionsProcessor, 'GroupExtraFunctions')
- def testProcessHotFunctionsGroupping(self, group_functions_method,
- extra_functions_method,
- common_functions_method):
- hot_functions_processor = self._CreateHotFunctionsProcessor(
- self._extra_cwp_functions_file, self._cwp_function_groups_file,
- self._cwp_function_groups_statistics_file,
- self._cwp_function_groups_file_prefix)
-
- hot_functions_processor.ProcessHotFunctions()
-
- self.assertTrue(common_functions_method.called)
- self.assertTrue(extra_functions_method.called)
- self.assertEqual(common_functions_method.call_count, 1)
- self.assertEqual(extra_functions_method.call_count, 1)
- self.assertTrue(group_functions_method.called)
- self.assertEqual(group_functions_method.call_count, 1)
-
- def testParseCWPStatistics(self):
- cwp_statistics = {'dummy_method1,dummy_file1': ('dummy_object1,1', 0),
- 'dummy_method2,dummy_file2': ('dummy_object2,2', 0),
- 'dummy_method3,dummy_file3': ('dummy_object3,3', 0),
- 'dummy_method4,dummy_file4': ('dummy_object4,4', 0)}
- hot_functions_processor = self._CreateHotFunctionsProcessor(
- self._extra_cwp_functions_file)
- result = hot_functions_processor.ParseCWPStatistics(
- self._cwp_functions_file_parsing)
-
- self.assertDictEqual(result, cwp_statistics)
-
- def testExtractCommonFunctions(self):
- hot_functions_processor = self._CreateHotFunctionsProcessor(
- self._extra_cwp_functions_file)
- common_functions_path = tempfile.mkdtemp()
- hot_functions_processor.ExtractCommonFunctions(self._pprof_path,
- common_functions_path,
- self._cwp_functions_file)
- expected_files = \
- [os.path.join(self._expected_common_functions_path, expected_file)
- for expected_file in os.listdir(self._expected_common_functions_path)]
- result_files = \
- [os.path.join(common_functions_path, result_file)
- for result_file in os.listdir(common_functions_path)]
-
- expected_files.sort()
- result_files.sort()
-
- for expected_file_name, result_file_name in \
- zip(expected_files, result_files):
- with open(expected_file_name) as expected_file:
- expected_output_lines = expected_file.readlines()
- self.checkFileContents(result_file_name, expected_output_lines)
- shutil.rmtree(common_functions_path)
-
- def testExtractExtraFunctions(self):
- cwp_statistics = {'dummy_method1,dummy_file1': ('dummy_object1,1', 0),
- 'dummy_method2,dummy_file2': ('dummy_object2,2', 1),
- 'dummy_method3,dummy_file3': ('dummy_object3,3', 1),
- 'dummy_method4,dummy_file4': ('dummy_object4,4', 0)}
- expected_output_lines = ['function,file,dso,inclusive_count\n',
- 'dummy_method4,dummy_file4,dummy_object4,4\n',
- 'dummy_method1,dummy_file1,dummy_object1,1']
- temp_file, temp_filename = tempfile.mkstemp()
- os.close(temp_file)
- hot_functions_processor = self._CreateHotFunctionsProcessor(temp_filename)
-
- hot_functions_processor.ExtractExtraFunctions(cwp_statistics, temp_filename)
- self.checkFileContents(temp_filename, expected_output_lines)
- os.remove(temp_filename)
-
- def testParseFunctionGroups(self):
- cwp_function_groups_lines = ['group1 /a\n', 'group2 /b\n', 'group3 /c\n',
- 'group4 /d\n']
- expected_output = [('group1', '/a', 0, []), ('group2', '/b', 0, []),
- ('group3', '/c', 0, []), ('group4', '/d', 0, [])]
- result = HotFunctionsProcessor.ParseFunctionGroups(
- cwp_function_groups_lines)
- self.assertListEqual(expected_output, result)
-
- def testGroupExtraFunctions(self):
- cwp_statistics = {'dummy_method1,/a/b': ('dummy_object1,1', 1),
- 'dummy_method2,/c/d': ('dummy_object2,2', 0),
- 'dummy_method3,/a/b': ('dummy_object3,3', 0),
- 'dummy_method4,/c/d': ('dummy_object4,4', 1),
- 'dummy_method5,/a/b': ('dummy_object5,5', 0),
- 'dummy_method6,/e': ('dummy_object6,6', 0),
- 'dummy_method7,/c/d': ('dummy_object7,7', 0),
- 'dummy_method8,/e': ('dummy_object8,8', 0)}
- cwp_groups_statistics_file, \
- cwp_groups_statistics_filename = tempfile.mkstemp()
-
- os.close(cwp_groups_statistics_file)
-
- cwp_groups_file_path = tempfile.mkdtemp()
- cwp_groups_file_prefix = os.path.join(cwp_groups_file_path, 'dummy')
- hot_functions_processor = self._CreateHotFunctionsProcessor(
- self._extra_cwp_functions_file)
-
- hot_functions_processor.GroupExtraFunctions(cwp_statistics,
- cwp_groups_file_prefix,
- self._cwp_function_groups_file,
- cwp_groups_statistics_filename)
-
- expected_group_ab_lines = ['function,file,dso,inclusive_count\n',
- 'dummy_method5,/a/b,dummy_object5,5\n',
- 'dummy_method3,/a/b,dummy_object3,3']
- expected_group_cd_lines = ['function,file,dso,inclusive_count\n',
- 'dummy_method7,/c/d,dummy_object7,7\n',
- 'dummy_method2,/c/d,dummy_object2,2']
- expected_group_e_lines = ['function,file,dso,inclusive_count\n',
- 'dummy_method8,/e,dummy_object8,8\n',
- 'dummy_method6,/e,dummy_object6,6']
- expected_group_statistics_lines = ['group,shared_path,inclusive_count\n',
- 'e,/e,14\n', 'cd,/c/d,9\n', 'ab,/a/b,8']
-
- self.checkFileContents('%sab' % (cwp_groups_file_prefix,),
- expected_group_ab_lines)
- self.checkFileContents('%scd' % (cwp_groups_file_prefix,),
- expected_group_cd_lines)
- self.checkFileContents('%se' % (cwp_groups_file_prefix,),
- expected_group_e_lines)
- self.checkFileContents(cwp_groups_statistics_filename,
- expected_group_statistics_lines)
-
- shutil.rmtree(cwp_groups_file_path)
- os.remove(cwp_groups_statistics_filename)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/user_activity_benchmarks/select_hot_functions.sql b/user_activity_benchmarks/select_hot_functions.sql
deleted file mode 100644
index d121d619..00000000
--- a/user_activity_benchmarks/select_hot_functions.sql
+++ /dev/null
@@ -1,27 +0,0 @@
--- Collects the function, with its file, the object and inclusive count value.
--- The limits here are entirely arbitrary.
--- For more background, look at
--- https://sites.google.com/a/google.com/cwp/about/callgraphs.
-SELECT
- frame.function_name AS function,
- frame.filename AS file,
- frame.load_module_path AS dso,
- sum(frame.inclusive_count) AS inclusive_count
-FROM
- -- Collect the data stored in CWP over the last 30 days.
- FLATTEN(chromeos_wide_profiling.sampledb.cycles.callgraph.last30days, frame)
-WHERE
- meta.cros.report_id % UINT64("1") == 0
- -- The reports were collected periodically.
- AND meta.cros.collection_info.trigger_event == 1
- AND `profile.duration_usec` < 2100000
- -- The reports were from a busy machine.
- AND session.total_count > 2000
- -- The reports are from the gnawty board, x86_64 architecture.
- AND meta.cros.board == "gnawty"
- AND meta.cros.cpu_architecture == "x86_64"
- -- The reports include callchain data.
- AND left(meta.cros.version, 4) > "6970"
- GROUP BY function, dso, file
-ORDER BY `inclusive_count` DESC
-LIMIT 50000 ;
diff --git a/user_activity_benchmarks/select_optimal_benchmark_set.py b/user_activity_benchmarks/select_optimal_benchmark_set.py
deleted file mode 100755
index 1c8305cf..00000000
--- a/user_activity_benchmarks/select_optimal_benchmark_set.py
+++ /dev/null
@@ -1,347 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Selects the optimal set of benchmarks.
-
-For each benchmark, there is a file with the common functions, as extracted by
-the process_hot_functions module.
-
-The script receives as input the CSV file with the CWP inclusive count values,
-the file with Chrome OS groups and the path containing a file with common
-functions for every benchmark.
-
-It extracts for every benchmark and for the CWP data all the functions that
-match the given Chrome OS groups.
-
-It generates all possible combinations of benchmark sets of a given size and
-it computes for every set a metric.
-It outputs the optimal sets, based on which ones have the best metric.
-
-Three different metrics have been used: function count, distance
-variation and score.
-
-For the function count metric, we count the unique functions covered by a
-set of benchmarks. Besides the number of unique functions, we compute also
-the fraction of unique functions out of the amount of CWP functions from the
-given groups. The benchmark set with the highest amount of unique functions
-that belong to all the given groups is considered better.
-
-For the distance variation metric, we compute the sum of the distance variations
-of the functions covered by a set of benchmarks. We define the distance
-variation as the difference between the distance value of a function and the
-ideal distance value (1.0). If a function appears in multiple common functions
-files, we consider only the minimum value. We compute also the distance
-variation per function. The set that has the smaller value for the
-distance variation per function is considered better.
-
-For the score metric, we compute the sum of the scores of the functions from a
-set of benchmarks. If a function appears in multiple common functions files,
-we consider only the maximum value. We compute also the fraction of this sum
-from the sum of all the scores of the functions from the CWP data covering the
-given groups, in the ideal case (the ideal score of a function is 1.0).
-
-We compute the metrics in the same manner for individual Chrome OS groups.
-"""
-
-from collections import defaultdict
-
-import argparse
-import csv
-import itertools
-import json
-import operator
-import os
-import sys
-
-import benchmark_metrics
-import utils
-
-
-class BenchmarkSet(object):
- """Selects the optimal set of benchmarks of given size."""
-
- # Constants that specify the metric type.
- FUNCTION_COUNT_METRIC = 'function_count'
- DISTANCE_METRIC = 'distance_variation'
- SCORE_METRIC = 'score_fraction'
-
- def __init__(self, benchmark_set_size, benchmark_set_output_file,
- benchmark_set_common_functions_path, cwp_inclusive_count_file,
- cwp_function_groups_file, metric):
- """Initializes the BenchmarkSet.
-
- Args:
- benchmark_set_size: Constant representing the size of a benchmark set.
- benchmark_set_output_file: The output file that will contain the set of
- optimal benchmarks with the metric values.
- benchmark_set_common_functions_path: The directory containing the files
- with the common functions for the list of benchmarks.
- cwp_inclusive_count_file: The CSV file containing the CWP functions with
- their inclusive count values.
- cwp_function_groups_file: The file that contains the CWP function groups.
- metric: The type of metric used for the analysis.
- """
- self._benchmark_set_size = int(benchmark_set_size)
- self._benchmark_set_output_file = benchmark_set_output_file
- self._benchmark_set_common_functions_path = \
- benchmark_set_common_functions_path
- self._cwp_inclusive_count_file = cwp_inclusive_count_file
- self._cwp_function_groups_file = cwp_function_groups_file
- self._metric = metric
-
- @staticmethod
- def OrganizeCWPFunctionsInGroups(cwp_inclusive_count_statistics,
- cwp_function_groups):
- """Selects the CWP functions that match the given Chrome OS groups.
-
- Args:
- cwp_inclusive_count_statistics: A dict with the CWP functions.
- cwp_function_groups: A list with the CWP function groups.
-
- Returns:
- A dict having as a key the name of the groups and as a value the list of
- CWP functions that match an individual group.
- """
- cwp_functions_grouped = defaultdict(list)
- for function_key in cwp_inclusive_count_statistics:
- _, file_name = function_key.split(',')
- for group_name, file_path in cwp_function_groups:
- if file_path not in file_name:
- continue
- cwp_functions_grouped[group_name].append(function_key)
- break
- return cwp_functions_grouped
-
- @staticmethod
- def OrganizeBenchmarkSetFunctionsInGroups(benchmark_set_files,
- benchmark_set_common_functions_path,
- cwp_function_groups):
- """Selects the benchmark functions that match the given Chrome OS groups.
-
- Args:
- benchmark_set_files: The list of common functions files corresponding to a
- benchmark.
- benchmark_set_common_functions_path: The directory containing the files
- with the common functions for the list of benchmarks.
- cwp_function_groups: A list with the CWP function groups.
-
- Returns:
- A dict having as a key the name of a common functions file. The value is
- a dict having as a key the name of a group and as value a list of
- functions that match the given group.
- """
-
- benchmark_set_functions_grouped = {}
- for benchmark_file_name in benchmark_set_files:
- benchmark_full_file_path = \
- os.path.join(benchmark_set_common_functions_path,
- benchmark_file_name)
- with open(benchmark_full_file_path) as input_file:
- statistics_reader = \
- csv.DictReader(input_file, delimiter=',')
- benchmark_functions_grouped = defaultdict(dict)
- for statistic in statistics_reader:
- function_name = statistic['function']
- file_name = statistic['file']
- for group_name, file_path in cwp_function_groups:
- if file_path not in file_name:
- continue
- function_key = ','.join([function_name, file_name])
- distance = float(statistic['distance'])
- score = float(statistic['score'])
- benchmark_functions_grouped[group_name][function_key] = \
- (distance, score)
- break
- benchmark_set_functions_grouped[benchmark_file_name] = \
- benchmark_functions_grouped
- return benchmark_set_functions_grouped
-
- @staticmethod
- def SelectOptimalBenchmarkSetBasedOnMetric(all_benchmark_combinations_sets,
- benchmark_set_functions_grouped,
- cwp_functions_grouped,
- metric_function_for_set,
- metric_comparison_operator,
- metric_default_value,
- metric_string):
- """Generic method that selects the optimal benchmark set based on a metric.
-
- The reason of implementing a generic function is to avoid logic duplication
- for selecting a benchmark set based on the three different metrics.
-
- Args:
- all_benchmark_combinations_sets: The list with all the sets of benchmark
- combinations.
- benchmark_set_functions_grouped: A dict with benchmark functions as
- returned by OrganizeBenchmarkSetFunctionsInGroups.
- cwp_functions_grouped: A dict with the CWP functions as returned by
- OrganizeCWPFunctionsInGroups.
- metric_function_for_set: The method used to compute the metric for a given
- benchmark set.
- metric_comparison_operator: A comparison operator used to compare two
- values of the same metric (i.e: operator.lt or operator.gt).
- metric_default_value: The default value for the metric.
- metric_string: A tuple of strings used in the JSON output for the pair of
- the values of the metric.
-
- Returns:
- A list of tuples containing for each optimal benchmark set. A tuple
- contains the list of benchmarks from the set, the pair of metric values
- and a dictionary with the metrics for each group.
- """
- optimal_sets = [([], metric_default_value, {})]
-
- for benchmark_combination_set in all_benchmark_combinations_sets:
- function_metrics = [benchmark_set_functions_grouped[benchmark]
- for benchmark in benchmark_combination_set]
- set_metrics, set_groups_metrics = \
- metric_function_for_set(function_metrics, cwp_functions_grouped,
- metric_string)
- optimal_value = optimal_sets[0][1][0]
- if metric_comparison_operator(set_metrics[0], optimal_value):
- optimal_sets = \
- [(benchmark_combination_set, set_metrics, set_groups_metrics)]
- elif set_metrics[0] == optimal_sets[0][1][0]:
- optimal_sets.append(
- (benchmark_combination_set, set_metrics, set_groups_metrics))
-
- return optimal_sets
-
- def SelectOptimalBenchmarkSet(self):
- """Selects the optimal benchmark sets and writes them in JSON format.
-
- Parses the CWP inclusive count statistics and benchmark common functions
- files. Organizes the functions into groups. For every optimal benchmark
- set, the method writes in the self._benchmark_set_output_file the list of
- benchmarks, the pair of metrics and a dictionary with the pair of
- metrics for each group covered by the benchmark set.
- """
-
- benchmark_set_files = os.listdir(self._benchmark_set_common_functions_path)
- all_benchmark_combinations_sets = \
- itertools.combinations(benchmark_set_files, self._benchmark_set_size)
-
- with open(self._cwp_function_groups_file) as input_file:
- cwp_function_groups = utils.ParseFunctionGroups(input_file.readlines())
-
- cwp_inclusive_count_statistics = \
- utils.ParseCWPInclusiveCountFile(self._cwp_inclusive_count_file)
- cwp_functions_grouped = self.OrganizeCWPFunctionsInGroups(
- cwp_inclusive_count_statistics, cwp_function_groups)
- benchmark_set_functions_grouped = \
- self.OrganizeBenchmarkSetFunctionsInGroups(
- benchmark_set_files, self._benchmark_set_common_functions_path,
- cwp_function_groups)
-
- if self._metric == self.FUNCTION_COUNT_METRIC:
- metric_function_for_benchmark_set = \
- benchmark_metrics.ComputeFunctionCountForBenchmarkSet
- metric_comparison_operator = operator.gt
- metric_default_value = (0, 0.0)
- metric_string = ('function_count', 'function_count_fraction')
- elif self._metric == self.DISTANCE_METRIC:
- metric_function_for_benchmark_set = \
- benchmark_metrics.ComputeDistanceForBenchmarkSet
- metric_comparison_operator = operator.lt
- metric_default_value = (float('inf'), float('inf'))
- metric_string = \
- ('distance_variation_per_function', 'total_distance_variation')
- elif self._metric == self.SCORE_METRIC:
- metric_function_for_benchmark_set = \
- benchmark_metrics.ComputeScoreForBenchmarkSet
- metric_comparison_operator = operator.gt
- metric_default_value = (0.0, 0.0)
- metric_string = ('score_fraction', 'total_score')
- else:
- raise ValueError("Invalid metric")
-
- optimal_benchmark_sets = \
- self.SelectOptimalBenchmarkSetBasedOnMetric(
- all_benchmark_combinations_sets, benchmark_set_functions_grouped,
- cwp_functions_grouped, metric_function_for_benchmark_set,
- metric_comparison_operator, metric_default_value, metric_string)
-
- json_output = []
-
- for benchmark_set in optimal_benchmark_sets:
- json_entry = {
- 'benchmark_set':
- list(benchmark_set[0]),
- 'metrics': {
- metric_string[0]: benchmark_set[1][0],
- metric_string[1]: benchmark_set[1][1]
- },
- 'groups':
- dict(benchmark_set[2])
- }
- json_output.append(json_entry)
-
- with open(self._benchmark_set_output_file, 'w') as output_file:
- json.dump(json_output, output_file)
-
-
-def ParseArguments(arguments):
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- '--benchmark_set_common_functions_path',
- required=True,
- help='The directory containing the CSV files with the common functions '
- 'of the benchmark profiles and CWP data. A file will contain all the hot '
- 'functions from a pprof top output file that are also included in the '
- 'file containing the cwp inclusive count values. The CSV fields are: the '
- 'function name, the file and the object where the function is declared, '
- 'the CWP inclusive count and inclusive count fraction values, the '
- 'cumulative and average distance, the cumulative and average score. The '
- 'files with the common functions will have the same names with the '
- 'corresponding pprof output files.')
- parser.add_argument(
- '--cwp_inclusive_count_file',
- required=True,
- help='The CSV file containing the CWP hot functions with their '
- 'inclusive_count values. The CSV fields include the name of the '
- 'function, the file and the object with the definition, the inclusive '
- 'count value and the inclusive count fraction out of the total amount of '
- 'inclusive count values.')
- parser.add_argument(
- '--benchmark_set_size',
- required=True,
- help='The size of the benchmark sets.')
- parser.add_argument(
- '--benchmark_set_output_file',
- required=True,
- help='The JSON output file containing optimal benchmark sets with their '
- 'metrics. For every optimal benchmark set, the file contains the list of '
- 'benchmarks, the pair of metrics and a dictionary with the pair of '
- 'metrics for each group covered by the benchmark set.')
- parser.add_argument(
- '--metric',
- required=True,
- help='The metric used to select the optimal benchmark set. The possible '
- 'values are: distance_variation, function_count and score_fraction.')
- parser.add_argument(
- '--cwp_function_groups_file',
- required=True,
- help='The file that contains the CWP function groups. A line consists in '
- 'the group name and a file path describing the group. A group must '
- 'represent a Chrome OS component.')
-
- options = parser.parse_args(arguments)
-
- return options
-
-
-def Main(argv):
- options = ParseArguments(argv)
- benchmark_set = BenchmarkSet(options.benchmark_set_size,
- options.benchmark_set_output_file,
- options.benchmark_set_common_functions_path,
- options.cwp_inclusive_count_file,
- options.cwp_function_groups_file, options.metric)
- benchmark_set.SelectOptimalBenchmarkSet()
-
-
-if __name__ == '__main__':
- Main(sys.argv[1:])
diff --git a/user_activity_benchmarks/symbolize_profiles.sh b/user_activity_benchmarks/symbolize_profiles.sh
deleted file mode 100755
index 904cc1ba..00000000
--- a/user_activity_benchmarks/symbolize_profiles.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Uses local_cwp to do the profile symbolization.
-# The profiles that need to be symbolized are placed in the profiles_path.
-# The results are placed in the local_cwp_results_path.
-
-set -e
-
-if [ "$#" -ne 3 ]; then
- echo "USAGE: symbolize_profiles.sh profiles_path local_cwp_binary_path " \
- "local_cwp_results_path"
- exit 1
-fi
-
-readonly PROFILES_PATH=$1
-readonly LOCAL_CWP_BINARY_PATH=$2
-readonly LOCAL_CWP_RESULTS_PATH=$3
-readonly PROFILES=$(ls $PROFILES_PATH)
-
-for profile in "${PROFILES[@]}"
-do
- $LOCAL_CWP_BINARY_PATH --output="$LOCAL_CWP_RESULTS_PATH/${profile}.pb.gz" \
- "$PROFILES_PATH/$profile"
- if [ $? -ne 0 ]; then
- echo "Failed to symbolize the perf profile output with local_cwp for " \
- "$profile."
- continue
- fi
-done
diff --git a/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68 b/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68
deleted file mode 100644
index 0177dabf..00000000
--- a/user_activity_benchmarks/telemetry_benchmarks_R52_8350.68
+++ /dev/null
@@ -1,113 +0,0 @@
-blink_perf.bindings
-blink_perf.canvas
-blink_perf.css
-blink_perf.dom
-blink_perf.events
-blink_perf.layout
-blink_perf.paint
-blink_perf.parser
-blink_perf.shadow_dom
-blink_perf.svg
-blink_style.top_25
-blob_storage.blob_storage
-dromaeo.cssqueryjquery
-dromaeo.domcoreattr
-dromaeo.domcoremodify
-dromaeo.domcorequery
-dromaeo.domcoretraverse
-dromaeo.jslibattrjquery
-dromaeo.jslibattrprototype
-dromaeo.jslibeventjquery
-dromaeo.jslibeventprototype
-dromaeo.jslibmodifyjquery
-dromaeo.jslibmodifyprototype
-dromaeo.jslibstylejquery
-dromaeo.jslibstyleprototype
-dromaeo.jslibtraversejquery
-dromaeo.jslibtraverseprototype
-dummy_benchmark.noisy_benchmark_1
-dummy_benchmark.stable_benchmark_1
-image_decoding.image_decoding_measurement
-indexeddb_perf
-jetstream
-jitter
-kraken
-media.chromeOS4kOnly.tough_video_cases
-media.chromeOS.tough_video_cases
-media.media_cns_cases
-media.mse_cases
-media.tough_video_cases_extra
-media.tough_video_cases
-memory.long_running_idle_gmail_background_tbmv2
-memory.long_running_idle_gmail_tbmv2
-memory.top_7_stress
-octane
-oilpan_gc_times.tough_animation_cases
-oortonline
-page_cycler.basic_oopif
-page_cycler.intl_hi_ru
-page_cycler.intl_ko_th_vi
-page_cycler_site_isolation.basic_oopif
-page_cycler.typical_25
-page_cycler_v2.basic_oopif
-page_cycler_v2.intl_ar_fa_he
-page_cycler_v2.intl_es_fr_pt-BR
-page_cycler_v2.intl_hi_ru
-page_cycler_v2.intl_ja_zh
-page_cycler_v2.intl_ko_th_vi
-page_cycler_v2_site_isolation.basic_oopif
-page_cycler_v2.top_10_mobile
-page_cycler_v2.typical_25
-rasterize_and_record_micro.key_mobile_sites_smooth
-rasterize_and_record_micro.key_silk_cases
-rasterize_and_record_micro.top_25_smooth
-robohornet_pro
-scheduler.tough_scheduling_cases
-service_worker.service_worker_micro_benchmark
-service_worker.service_worker
-smoothness.gpu_rasterization_and_decoding.image_decoding_cases
-smoothness.gpu_rasterization.tough_filters_cases
-smoothness.gpu_rasterization.tough_path_rendering_cases
-smoothness.image_decoding_cases
-smoothness.key_desktop_move_cases
-smoothness.scrolling_tough_ad_case
-smoothness.scrolling_tough_ad_cases
-smoothness.top_25_smooth
-smoothness.tough_ad_cases
-spaceport
-speedometer-ignition
-speedometer
-startup.cold.blank_page
-startup.large_profile.cold.blank_page
-startup.large_profile.warm.blank_page
-startup.large_profile.warm.blank
-startup.warm.blank_page
-start_with_ext.cold.blank_page
-start_with_ext.warm.blank_page
-storage.indexeddb_endure
-storage.indexeddb_endure_tracing
-sunspider
-system_health.memory_mobile
-tab_switching.five_blank_pages
-tab_switching.top_10
-tab_switching.tough_energy_cases
-tab_switching.tough_image_cases
-tab_switching.typical_25
-thread_times.tough_compositor_cases
-thread_times.tough_scrolling_cases
-top_10_mobile_memory_ignition
-top_10_mobile_memory
-tracing.tracing_with_background_memory_infra
-tracing.tracing_with_debug_overhead
-v8.browsing_mobile
-v8.detached_context_age_in_gc
-v8.google
-v8.infinite_scroll-ignition_tbmv2
-v8.infinite_scroll_tbmv2
-v8.todomvc-ignition
-v8.todomvc
-v8.top_25_smooth
-webrtc.datachannel
-webrtc.getusermedia
-webrtc.peerconnection
-webrtc.webrtc_smoothness
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof
deleted file mode 100644
index 30d4c83a..00000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file1.pprof
+++ /dev/null
@@ -1,3 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548,3007599556,0.81%,42.16%,13057167098,3.51%
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525,2725201614,0.73%,45.17%,3511333688,0.94% \ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof
deleted file mode 100644
index bef92666..00000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file2.pprof
+++ /dev/null
@@ -1,2 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369,4585860529,3.95%,3.95%,13583834527,11.70% \ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof b/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof
deleted file mode 100644
index 7bac48e3..00000000
--- a/user_activity_benchmarks/testdata/expected/pprof_common/file3.pprof
+++ /dev/null
@@ -1,4 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764,1628614163,1.64%,27.31%,1633246854,1.64%
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054,1455734663,1.46%,31.79%,1456692596,1.46%
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960,1137455802,1.14%,34.21%,1150209506,1.16% \ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/input/cwp_function_groups.txt b/user_activity_benchmarks/testdata/input/cwp_function_groups.txt
deleted file mode 100644
index 4233d035..00000000
--- a/user_activity_benchmarks/testdata/input/cwp_function_groups.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-ab /a/b
-cd /c/d
-e /e
diff --git a/user_activity_benchmarks/testdata/input/cwp_functions_file.csv b/user_activity_benchmarks/testdata/input/cwp_functions_file.csv
deleted file mode 100644
index 6c5ed587..00000000
--- a/user_activity_benchmarks/testdata/input/cwp_functions_file.csv
+++ /dev/null
@@ -1,38 +0,0 @@
-function,file,dso,inclusive_count
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,debug/opt/google/chrome/chrome,45766441
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369
-base::MessageLoop::Run,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,debug/opt/google/chrome/chrome,41135127
-blink::StyleInvalidator::RecursionCheckpoint::RecursionCheckpoint,debug/opt/google/chrome/chrome,38403286
-base::MessageLoop::RunTask,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,debug/opt/google/chrome/chrome,38397557
-base::debug::TaskAnnotator::RunTask,/home/chrome-bot/chrome_root/src/base/debug/task_annotator.cc,debug/opt/google/chrome/chrome,38322520
-WTF::HashTableConstIterator::skipEmptyBuckets,debug/opt/google/chrome/chrome,34950293
-unpack_ubyte_b8g8r8a8_unorm /build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,34486616
-base::internal::RunnableAdapter::Run,/home/chrome-bot/chrome_root/src/base/bind_internal.h,debug/opt/google/chrome/chrome,34281237
-blink::Element::hasID /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/dom/Element.h,debug/opt/google/chrome/chrome,34237955
-blink::ElementV8Internal::idAttributeGetterCallback,debug/opt/google/chrome/chrome,32481250
-_start,,debug/opt/google/chrome/chrome,32451253
-__libc_start_main,/var/tmp/portage/cross-x86_64-cros-linux-gnu/glibc-2.19-r9/work/glibc-2.19/csu/libc-start.c,debug/lib64/libc-2.19.so,32124944
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548
-sha_transform /mnt/host/source/src/third_party/kernel/v3.10/lib/sha1.c,debug/opt/google/chrome/chrome,30615551
-ChromeMain,/home/chrome-bot/chrome_root/src/chrome/app/chrome_main.cc,debug/opt/google/chrome/chrome,30595408
-__clone,sysdeps/unix/sysv/linux/x86_64/clone.S,debug/lib64/libc-2.19.so,25480585
-start_thread,/var/tmp/portage/cross-x86_64-cros-linux-gnu/glibc-2.19-r9/work/glibc-2.19/nptl/pthread_create.c,debug/lib64/libpthread-2.19.so,24504351
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525
-base::(anonymous namespace)::ThreadFunc,/home/chrome-bot/chrome_root/src/base/threading/platform_thread_posix.cc,debug/opt/google/chrome/chrome,20700177
-base::Callback::Run,/home/chrome-bot/chrome_root/src/base/callback.h,/opt/google/chrome/chrome,20455633
-,,//anon,20220979
-SkSwizzle_RB /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,debug/opt/google/chrome/chrome,19673187
-base::MessageLoop::Run,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,19247788
-scheduler::TaskQueueManager::DoWork,/home/chrome-bot/chrome_root/src/components/scheduler/base/task_queue_manager.cc,debug/opt/google/chrome/chrome,19207528
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960
-scheduler::TaskQueueManager::ProcessTaskFromWorkQueue,/home/chrome-bot/chrome_root/src/components/scheduler/base/task_queue_manager.cc,debug/opt/google/chrome/chrome,18975400
-base::MessageLoop::DeferOrRunPendingTask,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,17864182
-,[anon],100011
-blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp,/opt/google/chrome/chrome,17862466
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054
-base::internal::Invoker::Run,/home/chrome-bot/chrome_root/src/base/bind_internal.h,/opt/google/chrome/chrome,16438965
-base::MessageLoop::DoWork,/home/chrome-bot/chrome_root/src/base/message_loop/message_loop.cc,/opt/google/chrome/chrome,16029394
-base::internal::InvokeHelper::MakeItSo,/home/chrome-bot/chrome_root/src/base/bind_internal.h,/opt/google/chrome/chrome,15569953
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764
-base::Thread::ThreadMain,/home/chrome-bot/chrome_root/src/base/threading/thread.cc,debug/opt/google/chrome/chrome,15094458
-_start,,/opt/google/chrome/chrome,15014598
diff --git a/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv b/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv
deleted file mode 100644
index bc0cca6c..00000000
--- a/user_activity_benchmarks/testdata/input/inclusive_count_reference.csv
+++ /dev/null
@@ -1,8 +0,0 @@
-function,file,dso,inclusive_count,inclusive_count_fraction
-func_f,/a/b/file_f,f,1,1
-func_g,/a/b/file_g,g,2,2
-func_g,/a/b/../../a/b/file_g,g,3,2.4
-func_h,/c/d/file_h,h,4,3
-func_i,/c/d/file_i,i,5,4
-func_j,/e/file_j,j,6,5
-func_l,/e/file_l,l,7,6
diff --git a/user_activity_benchmarks/testdata/input/inclusive_count_test.csv b/user_activity_benchmarks/testdata/input/inclusive_count_test.csv
deleted file mode 100644
index c9938276..00000000
--- a/user_activity_benchmarks/testdata/input/inclusive_count_test.csv
+++ /dev/null
@@ -1,8 +0,0 @@
-function,file,dso,inclusive_count,inclusive_count_fraction
-func_f,/a/b/file_f,f,1,1.1
-func_g,/a/b/file_g,g,2,2.2
-func_f,/a/b/file_f,f,3,1.2
-func_h,/c/d/../../c/d/file_h,h,1,3.3
-func_i,/c/d/file_i,i,5,4.4
-func_j,/e/file_j,j,6,5.5
-func_k,/e/file_k,k,7,6.6
diff --git a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv b/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv
deleted file mode 100644
index 7d7a49a1..00000000
--- a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_reference.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-parent_child_functions,child_function_file,inclusive_count
-func_f;;func_g,/a/../a/b/file_g,0.1
-func_f;;func_h,/c/d/../d/file_h,0.2
-func_f;;func_i,/c/d/file_i,0.3
-func_g;;func_j,/e/file_j,0.4
diff --git a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv b/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv
deleted file mode 100644
index a3fb72f5..00000000
--- a/user_activity_benchmarks/testdata/input/pairwise_inclusive_count_test.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-parent_child_functions,child_function_file,inclusive_count
-func_f;;func_g,/a/b/file_g2,0.01
-func_f;;func_h,/c/../c/d/file_h,0.02
-func_f;;func_i,/c/../c/d/file_i,0.03
-func_g;;func_j,/e/file_j,0.4
-func_g;;func_m,/e/file_m,0.6
diff --git a/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv b/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv
deleted file mode 100644
index a4c7ced9..00000000
--- a/user_activity_benchmarks/testdata/input/parse_cwp_statistics.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-function,file,dso,inclusive_count
-dummy_method1,dummy_file1/a/b/../../,dummy_object1,1
-dummy_method2,dummy_file2//,dummy_object2,2
-,,321223321,1
-dummy_method3,dummy_file3/a/../,dummy_object3,3
-dummy_method4,dummy_file4/./,dummy_object4,4
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof
deleted file mode 100644
index 62e327b8..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file1.pprof
+++ /dev/null
@@ -1,20 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 239632475284, 64.41% of 372058624378 total
-Dropped 33979 nodes (cum <= 1860293121)
- flat flat% sum% cum cum%
- 115734836217 31.11% 31.11% 329503350629 88.56% [anon]
- 9839378797 2.64% 33.75% 14384869492 3.87% blink::v8StringToWebCoreString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp
- 6054608957 1.63% 35.38% 8069380147 2.17% v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 4651723038 1.25% 36.63% 8205985387 2.21% blink::ElementV8Internal::idAttributeGetterCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
- 4569044106 1.23% 37.86% 6408862507 1.72% blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp
- 3354819815 0.9% 38.76% 3361796139 0.9% v8::internal::Internals::ReadField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 3277220829 0.88% 39.64% 14077115947 3.78% blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp
- 3225711531 0.87% 40.51% 3228415743 0.87% v8::internal::Internals::HasHeapObjectTag /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 3139339048 0.84% 41.35% 3144663928 0.85% v8::internal::Bitmap::MarkBitFromIndex /home/chrome-bot/chrome_root/src/v8/src/heap/spaces.h (inline)
- 3007599556 0.81% 42.16% 13057167098 3.51% blink::ElementV8Internal::getAttributeMethodCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
- 2907238921 0.78% 42.94% 2930031660 0.79% v8::base::NoBarrier_Load /home/chrome-bot/chrome_root/src/v8/src/base/atomicops_internals_x86_gcc.h (inline)
- 2791274646 0.75% 43.69% 11058283504 2.97% v8::internal::MarkCompactMarkingVisitor::VisitUnmarkedObjects /home/chrome-bot/chrome_root/src/v8/src/heap/mark-compact.cc (inline)
- 2786321388 0.75% 44.44% 2794002850 0.75% WTF::hashInt /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashFunctions.h (inline)
- 2725201614 0.73% 45.17% 3511333688 0.94% base::RunLoop::Run /home/chrome-bot/chrome_root/src/base/run_loop.cc
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof
deleted file mode 100644
index 6d22bff3..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file2.pprof
+++ /dev/null
@@ -1,17 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 48939666671, 42.14% of 116136877744 total
-Dropped 35196 nodes (cum <= 580684388)
- flat flat% sum% cum cum%
- 4585860529 3.95% 3.95% 13583834527 11.70% blink::InvalidationSet::invalidatesElement /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/a/b/../../InvalidationSet.cpp
- 3791928512 3.27% 7.21% 35145646088 30.26% blink::StyleInvalidator::invalidate /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp
- 2871318565 2.47% 9.69% 2979878602 2.57% blink::StyleInvalidator::RecursionCheckpoint::~RecursionCheckpoint /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.h (inline)
- 1914657964 1.65% 11.33% 2164475253 1.86% WTF::StringImpl::lower /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/text/StringImpl.cpp
- 1841071698 1.59% 12.92% 13112332809 11.29% blink::StyleInvalidator::RecursionData::matchesCurrentInvalidationSets /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp (inline)
- 1825142681 1.57% 14.49% 1828134467 1.57% blink::StyleInvalidator::RecursionCheckpoint::RecursionCheckpoint /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.h (inline)
- 1727655605 1.49% 15.98% 1925839708 1.66% blink::Element::hasID /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/dom/Element.h (inline)
- 1548329435 1.33% 17.31% 14927333582 12.85% blink::StyleInvalidator::checkInvalidationSetsAgainstElement /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/StyleInvalidator.cpp (inline)
- 1429307046 1.23% 18.54% 1931177544 1.66% WTF::HashTableConstIterator::skipEmptyBuckets /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashTable.h
- 1298665649 1.12% 19.66% 4872203383 4.20% blink::SelectorChecker::matchSelector /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/SelectorChecker.cpp
- 1241347773 1.07% 20.73% 88048746121 75.81% [anon]
diff --git a/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof b/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof
deleted file mode 100644
index 6cbf1247..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_top/file3.pprof
+++ /dev/null
@@ -1,21 +0,0 @@
-File: perf
-Build ID: 1000000000
-Type: instructions_event
-Showing nodes accounting for 53216795676, 53.50% of 99475025143 total
-Dropped 39931 nodes (cum <= 497375125)
- flat flat% sum% cum cum%
- 6447071173 6.48% 6.48% 6461127774 6.50% s_mpv_mul_add_vec64 mpi/mpi_amd64_gas.s
- 5026798026 5.05% 11.53% 5033673091 5.06% SkMulDiv255Round /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkMath.h (inline)
- 3520577246 3.54% 15.07% 4431002672 4.45% wk_png_write_find_filter /home/chrome-bot/chrome_root/src/third_party/libpng/pngwutil.c
- 2907776944 2.92% 18.00% 3738572984 3.76% __memcpy_sse2_unaligned ../sysdeps/x86_64/multiarch/memcpy-sse2-unaligned.S
- 2632046464 2.65% 20.64% 2636062338 2.65% longest_match /home/chrome-bot/chrome_root/src/third_party/zlib/deflate.c (inline)
- 1699966816 1.71% 22.35% 1699966816 1.71% _mm_set_epi32 /usr/lib/gcc/x86_64-cros-linux-gnu/4.9.x/include/emmintrin.h (inline)
- 1669101893 1.68% 24.03% 1673814801 1.68% s_mp_sqr_comba_16 /build/gnawty/tmp/portage/dev-libs/nss-3.23-r1/work/nss-3.23/nss-.amd64/lib/freebl/mpi/mp_comba.c
- 1634108599 1.64% 25.67% 4636591817 4.66% convert32_row /home/chrome-bot/chrome_root/src/third_party/skia/src/core/SkConfig8888.cpp
- 1628614163 1.64% 27.31% 1633246854 1.64% SkPackARGB32 /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h (inline)
- 1541044177 1.55% 28.86% 3001680713 3.02% convert32 /home/chrome-bot/chrome_root/src/third_party/skia/src/core/SkConfig8888.cpp (inline)
- 1458290775 1.47% 30.32% 1459976296 1.47% SkSwizzle_RB /home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h (inline)
- 1455734663 1.46% 31.79% 1456692596 1.46% MOZ_Z_adler32 /home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c
- 1272700545 1.28% 33.07% 1858067219 1.87% sha_transform /mnt/host/source/src/third_party/kernel/v3.10/lib/sha1.c
- 1137455802 1.14% 34.21% 1150209506 1.16% unpack_ubyte_b8g8r8a8_unorm /build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c (inline)
- 1036731662 1.04% 35.25% 32561535338 32.73% [anon]
diff --git a/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv b/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv
deleted file mode 100644
index 67af7248..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_top_csv/file1.csv
+++ /dev/null
@@ -1,15 +0,0 @@
-function,file,flat,flat_p,sum_p,cum,cum_p
-v8::internal::Bitmap::MarkBitFromIndex,/home/chrome-bot/chrome_root/src/v8/src/heap/spaces.h,3139339048,0.0084,0.4135,3144663928,0.0085
-v8::base::NoBarrier_Load,/home/chrome-bot/chrome_root/src/v8/src/base/atomicops_internals_x86_gcc.h,2907238921,0.0078,0.4294,2930031660,0.0079
-v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,6054608957,0.0163,0.3538,8069380147,0.0217
-[anon],,115734836217,0.3111,0.3111,329503350629,0.8856
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,2725201614,0.0073,0.4517,3511333688,0.0094
-WTF::hashInt,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/HashFunctions.h,2786321388,0.0075,0.4444,2794002850,0.0075
-blink::ElementV8Internal::idAttributeGetterCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,4651723038,0.0125,0.3663,8205985387,0.0221
-v8::internal::Internals::ReadField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,3354819815,0.009,0.3876,3361796139,0.009
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,9839378797,0.0264,0.3375,14384869492,0.0387
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,4569044106,0.0123,0.3786,6408862507,0.0172
-blink::DocumentV8Internal::getElementByIdMethodCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Document.cpp,3277220829,0.0088,0.3964,14077115947,0.0378
-v8::internal::MarkCompactMarkingVisitor::VisitUnmarkedObjects,/home/chrome-bot/chrome_root/src/v8/src/heap/mark-compact.cc,2791274646,0.0075,0.4369,11058283504,0.0297
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,3007599556,0.0081,0.4216,13057167098,0.0351
-v8::internal::Internals::HasHeapObjectTag,/home/chrome-bot/chrome_root/src/v8/include/v8.h,3225711531,0.0087,0.4051,3228415743,0.0087
diff --git a/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof b/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof
deleted file mode 100644
index 69b5606d..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_tree/file1.pprof
+++ /dev/null
@@ -1,29 +0,0 @@
-File: perf
-Build ID: 37750b32016528ac896fc238e0d00513e218fd9e
-Type: instructions_event
-Showing nodes accounting for 234768811461, 63.10% of 372058624378 total
-Dropped 33979 nodes (cum <= 1860293121)
-Showing top 80 nodes out of 271
-----------------------------------------------------------+-------------
- flat flat% sum% cum cum% calls calls% + context
-----------------------------------------------------------+-------------
- 13412390629 93.24% | blink::V8StringResource::toString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.h
- 437497332 3.04% | [anon]
- 378465996 2.63% | blink::V8StringResource::operator WTF::AtomicString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.h
-9839378797 2.64% 33.75% 14384869492 3.87% | blink::v8StringToWebCoreString /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp
- 3180428647 22.11% | v8::String::GetExternalStringResourceBase /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 514301458 3.58% | WTF::RefPtr::RefPtr /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/RefPtr.h (inline)
-----------------------------------------------------------+-------------
- 8205985387 100% | [anon]
-4651723038 1.25% 36.63% 8205985387 2.21% | blink::ElementV8Internal::idAttributeGetterCallback /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp
- 717786059 8.75% | v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-----------------------------------------------------------+-------------
- 6408862507 100% | [anon]
-4569044106 1.23% 37.86% 6408862507 1.72% | blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld /var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp
- 773479621 12.07% | v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 690710254 10.78% | blink::v8SetReturnValueForMainWorld /home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8Binding.h (inline)
-----------------------------------------------------------+-------------
- 2005371070 59.65% | v8::Object::GetAlignedPointerFromInternalField /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
- 954968101 28.41% | v8::String::GetExternalStringResourceBase /home/chrome-bot/chrome_root/src/v8/include/v8.h (inline)
-3354819815 0.9% 38.76% 3361796139 0.9% | v8::internal::Internals::ReadField /home/chrome-bot/chrome_root/src/v8/include/v8.h
-----------------------------------------------------------+-------------
diff --git a/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv b/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv
deleted file mode 100644
index 9b155614..00000000
--- a/user_activity_benchmarks/testdata/input/pprof_tree_csv/file1.csv
+++ /dev/null
@@ -1,6 +0,0 @@
-parent_function,parent_function_file,child_function,child_function_file,inclusive_count_fraction
-blink::ElementV8Internal::idAttributeGetterCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.0875
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,WTF::RefPtr::RefPtr,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/wtf/RefPtr.h,0.0358
-blink::v8StringToWebCoreString,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8StringResource.cpp,v8::String::GetExternalStringResourceBase,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.2211
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,blink::v8SetReturnValueForMainWorld,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/bindings/core/v8/V8Binding.h,0.10779999999999999
-blink::NodeV8Internal::firstChildAttributeGetterCallbackForMainWorld,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Node.cpp,v8::Object::GetAlignedPointerFromInternalField,/home/chrome-bot/chrome_root/src/v8/include/v8.h,0.1207
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof
deleted file mode 100644
index 30d4c83a..00000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file1.pprof
+++ /dev/null
@@ -1,3 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::ElementV8Internal::getAttributeMethodCallback,/var/cache/chromeos-chrome/chrome-src-internal/src/out_gnawty/Release/gen/blink/bindings/core/v8/V8Element.cpp,debug/opt/google/chrome/chrome,30638548,3007599556,0.81%,42.16%,13057167098,3.51%
-base::RunLoop::Run,/home/chrome-bot/chrome_root/src/base/run_loop.cc,/opt/google/chrome/chrome,21484525,2725201614,0.73%,45.17%,3511333688,0.94% \ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof
deleted file mode 100644
index bef92666..00000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file2.pprof
+++ /dev/null
@@ -1,2 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-blink::InvalidationSet::invalidatesElement,/home/chrome-bot/chrome_root/src/third_party/WebKit/Source/core/css/invalidation/InvalidationSet.cpp,debug/opt/google/chrome/chrome,42293369,4585860529,3.95%,3.95%,13583834527,11.70% \ No newline at end of file
diff --git a/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof b/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof
deleted file mode 100644
index 7bac48e3..00000000
--- a/user_activity_benchmarks/testdata/results/pprof_common/file3.pprof
+++ /dev/null
@@ -1,4 +0,0 @@
-function,file,dso,inclusive_count,flat,flat%,sum%,cum,cum%
-SkPackARGB32,/home/chrome-bot/chrome_root/src/third_party/skia/include/core/SkColorPriv.h,/opt/google/chrome/chrome,15535764,1628614163,1.64%,27.31%,1633246854,1.64%
-MOZ_Z_adler32,/home/chrome-bot/chrome_root/src/third_party/pdfium/third_party/zlib_v128/adler32.c,/opt/google/chrome/chrome,17825054,1455734663,1.46%,31.79%,1456692596,1.46%
-unpack_ubyte_b8g8r8a8_unorm,/build/gnawty/tmp/portage/media-libs/mesa-11.3.0-r14/work/Mesa-11.3.0/src/mesa/main/format_unpack.c,debug/opt/google/chrome/chrome,19183960,1137455802,1.14%,34.21%,1150209506,1.16% \ No newline at end of file
diff --git a/user_activity_benchmarks/utils.py b/user_activity_benchmarks/utils.py
deleted file mode 100644
index 009b241a..00000000
--- a/user_activity_benchmarks/utils.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Utility functions for parsing pprof, CWP data and Chrome OS groups files."""
-
-from collections import defaultdict
-
-import csv
-import os
-import re
-
-SEPARATOR_REGEX = re.compile(r'-+\+-+')
-FUNCTION_STATISTIC_REGEX = \
- re.compile(r'(\S+)\s+(\S+)%\s+(\S+)%\s+(\S+)\s+(\S+)%')
-CHILD_FUNCTION_PERCENTAGE_REGEX = re.compile(r'([0-9.]+)%')
-FUNCTION_KEY_SEPARATOR_REGEX = re.compile(r'\|\s+')
-# Constants used to identify if a function is common in the pprof and CWP
-# files.
-COMMON_FUNCTION = 'common'
-EXTRA_FUNCTION = 'extra'
-PARENT_CHILD_FUNCTIONS_SEPARATOR = ';;'
-# List of pairs of strings used for make substitutions in file names to make
-# CWP and pprof data consistent.
-FILE_NAME_REPLACING_PAIR_STRINGS = [('gnawty', 'BOARD'),
- ('amd64-generic', 'BOARD'),
- (' ../sysdeps', ',sysdeps'),
- (' ../nptl', ',nptl'),
- (' aes-x86_64.s', ',aes-x86_64.s'),
- (' (inline)', ''),
- (' (partial-inline)', ''),
- (' ../', ','),
- ('../', '')]
-# Separator used to delimit the function from the file name.
-FUNCTION_FILE_SEPARATOR = ' /'
-
-
-def MakeCWPAndPprofFileNamesConsistent(file_name):
- """Makes the CWP and pprof file names consistent.
-
- For the same function, it may happen for some file paths to differ slightly
- in the CWP data compared to the pprof output. In a file name, for each tuple
- element of the list, we substitute the first element with the second one.
-
- Args:
- file_name: A string representing the name of the file.
-
- Returns:
- A string representing the modified name of tihe file.
- """
- file_name = file_name.replace(', ', '; ')
- for replacing_pair_string in FILE_NAME_REPLACING_PAIR_STRINGS:
- file_name = file_name.replace(replacing_pair_string[0],
- replacing_pair_string[1])
-
- return file_name
-
-def MakePprofFunctionKey(function_and_file_name):
- """Creates the function key from the function and file name.
-
- Parsing the the pprof --top and --tree outputs is difficult due to the fact
- that it hard to extract the function and file name (i.e the function names
- can have a lot of unexpected charachters such as spaces, operators etc).
- For the moment, we used FUNCTION_FILE_SEPARATOR as delimiter between the
- function and the file name. However, there are some cases where the file name
- does not start with / and we treat this cases separately (i.e ../sysdeps,
- ../nptl, aes-x86_64.s).
-
- Args:
- function_and_file_name: A string representing the function and the file name
- as it appears in the pprof output.
-
- Returns:
- A string representing the function key, composed from the function and file
- name, comma separated.
- """
- # TODO(evelinad): Use pprof --topproto instead of pprof --top to parse
- # protobuffers instead of text output. Investigate if there is an equivalent
- # for pprof --tree that gives protobuffer output.
- #
- # In the CWP output, we replace the , with ; as a workaround for parsing
- # csv files. We do the same for the pprof output.
- #
- # TODO(evelinad): Use dremel --csv_dialect=excel-tab in the queries for
- # replacing the , delimiter with tab.
- function_and_file_name = function_and_file_name.replace(', ', '; ')
- # If the function and file name sequence contains the FUNCTION_FILE_SEPARATOR,
- # we normalize the path name of the file and make the string subtitutions
- # to make the CWP and pprof data consistent. The returned key is composed
- # from the function name and normalized file path name, separated by a comma.
- # If the function and file name does not contain the FUNCTION_FILE_SEPARATOR,
- # we just do the strings substitution.
- if FUNCTION_FILE_SEPARATOR in function_and_file_name:
- function_name, file_name = \
- function_and_file_name.split(FUNCTION_FILE_SEPARATOR)
- file_name = \
- MakeCWPAndPprofFileNamesConsistent(os.path.normpath("/" + file_name))
- return ','.join([function_name, file_name])
-
- return MakeCWPAndPprofFileNamesConsistent(function_and_file_name)
-
-
-def ComputeCWPCummulativeInclusiveStatistics(cwp_inclusive_count_statistics):
- """Computes the cumulative inclusive count value of a function.
-
- A function might appear declared in multiple files or objects. When
- computing the fraction of the inclusive count value from a child function to
- the parent function, we take into consideration the sum of the
- inclusive_count
- count values from all the ocurences of that function.
-
- Args:
- cwp_inclusive_count_statistics: A dict containing the inclusive count
- statistics extracted by the ParseCWPInclusiveCountFile method.
-
- Returns:
- A dict having as a ket the name of the function and as a value the sum of
- the inclusive count values of the occurences of the functions from all
- the files and objects.
- """
- cwp_inclusive_count_statistics_cumulative = defaultdict(int)
-
- for function_key, function_statistics \
- in cwp_inclusive_count_statistics.iteritems():
- function_name, _ = function_key.split(',')
- cwp_inclusive_count_statistics_cumulative[function_name] += \
- function_statistics[1]
-
- return cwp_inclusive_count_statistics_cumulative
-
-def ComputeCWPChildFunctionsFractions(cwp_inclusive_count_statistics_cumulative,
- cwp_pairwise_inclusive_count_statistics):
- """Computes the fractions of the inclusive count values for child functions.
-
- The fraction represents the inclusive count value of a child function over
- the one of the parent function.
-
- Args:
- cwp_inclusive_count_statistics_cumulative: A dict containing the
- cumulative inclusive count values of the CWP functions.
- cwp_pairwise_inclusive_count_statistics: A dict containing the inclusive
- count statistics for pairs of parent and child functions. The key is the
- parent function. The value is a dict with the key the name of the child
- function and the file name, comma separated, and the value is the
- inclusive count value of the pair of parent and child functions.
-
- Returns:
- A dict containing the inclusive count statistics for pairs of parent
- and child functions. The key is the parent function. The value is a
- dict with the key the name of the child function and the file name,
- comma separated, and the value is the inclusive count fraction of the
- child function out of the parent function.
- """
-
- pairwise_inclusive_count_fractions = {}
-
- for parent_function_key, child_functions_metrics in \
- cwp_pairwise_inclusive_count_statistics.iteritems():
- child_functions_fractions = {}
- parent_function_inclusive_count = \
- cwp_inclusive_count_statistics_cumulative.get(parent_function_key, 0.0)
-
- if parent_function_key in cwp_inclusive_count_statistics_cumulative:
- for child_function_key, child_function_inclusive_count \
- in child_functions_metrics.iteritems():
- child_functions_fractions[child_function_key] = \
- child_function_inclusive_count / parent_function_inclusive_count
- else:
- for child_function_key, child_function_inclusive_count \
- in child_functions_metrics.iteritems():
- child_functions_fractions[child_function_key] = 0.0
- pairwise_inclusive_count_fractions[parent_function_key] = \
- child_functions_fractions
-
- return pairwise_inclusive_count_fractions
-
-def ParseFunctionGroups(cwp_function_groups_lines):
- """Parses the contents of the function groups file.
-
- Args:
- cwp_function_groups_lines: A list of the lines contained in the CWP
- function groups file. A line contains the group name and the file path
- that describes the group, separated by a space.
-
- Returns:
- A list of tuples containing the group name and the file path.
- """
- # The order of the groups mentioned in the cwp_function_groups file
- # matters. A function declared in a file will belong to the first
- # mentioned group that matches its path to the one of the file.
- # It is possible to have multiple paths that belong to the same group.
- return [tuple(line.split()) for line in cwp_function_groups_lines]
-
-
-def ParsePprofTopOutput(file_name):
- """Parses a file that contains the output of the pprof --top command.
-
- Args:
- file_name: The name of the file containing the pprof --top output.
-
- Returns:
- A dict having as a key the name of the function and the file containing
- the declaration of the function, separated by a comma, and as a value
- a tuple containing the flat, flat percentage, sum percentage, cummulative
- and cummulative percentage values.
- """
-
- pprof_top_statistics = {}
-
- # In the pprof top output, the statistics of the functions start from the
- # 6th line.
- with open(file_name) as input_file:
- pprof_top_content = input_file.readlines()[6:]
-
- for line in pprof_top_content:
- function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
- flat, flat_p, sum_p, cum, cum_p = function_statistic_match.groups()
- flat_p = str(float(flat_p) / 100.0)
- sum_p = str(float(sum_p) / 100.0)
- cum_p = str(float(cum_p) / 100.0)
- lookup_index = function_statistic_match.end()
- function_and_file_name = line[lookup_index + 2 : -1]
- key = MakePprofFunctionKey(function_and_file_name)
- pprof_top_statistics[key] = (flat, flat_p, sum_p, cum, cum_p)
- return pprof_top_statistics
-
-
-def ParsePprofTreeOutput(file_name):
- """Parses a file that contains the output of the pprof --tree command.
-
- Args:
- file_name: The name of the file containing the pprof --tree output.
-
- Returns:
- A dict including the statistics for pairs of parent and child functions.
- The key is the name of the parent function and the file where the
- function is declared, separated by a comma. The value is a dict having as
- a key the name of the child function and the file where the function is
- delcared, comma separated and as a value the percentage of time the
- parent function spends in the child function.
- """
-
- # In the pprof output, the statistics of the functions start from the 9th
- # line.
- with open(file_name) as input_file:
- pprof_tree_content = input_file.readlines()[9:]
-
- pprof_tree_statistics = defaultdict(lambda: defaultdict(float))
- track_child_functions = False
-
- # The statistics of a given function, its parent and child functions are
- # included between two separator marks.
- # All the parent function statistics are above the line containing the
- # statistics of the given function.
- # All the statistics of a child function are below the statistics of the
- # given function.
- # The statistics of a parent or a child function contain the calls, calls
- # percentage, the function name and the file where the function is declared.
- # The statistics of the given function contain the flat, flat percentage,
- # sum percentage, cummulative, cummulative percentage, function name and the
- # name of the file containing the declaration of the function.
- for line in pprof_tree_content:
- separator_match = SEPARATOR_REGEX.search(line)
-
- if separator_match:
- track_child_functions = False
- continue
-
- parent_function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
-
- if parent_function_statistic_match:
- track_child_functions = True
- lookup_index = parent_function_statistic_match.end()
- parent_function_key_match = \
- FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
- lookup_index = parent_function_key_match.end()
- parent_function_key = MakePprofFunctionKey(line[lookup_index:-1])
- continue
-
- if not track_child_functions:
- continue
-
- child_function_statistic_match = \
- CHILD_FUNCTION_PERCENTAGE_REGEX.search(line)
- child_function_percentage = \
- float(child_function_statistic_match.group(1))
- lookup_index = child_function_statistic_match.end()
- child_function_key_match = \
- FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
- lookup_index = child_function_key_match.end()
- child_function_key = MakePprofFunctionKey(line[lookup_index:-1])
-
- pprof_tree_statistics[parent_function_key][child_function_key] += \
- child_function_percentage / 100.0
-
- return pprof_tree_statistics
-
-
-def ParseCWPInclusiveCountFile(file_name):
- """Parses the CWP inclusive count files.
-
- A line should contain the name of the function, the file name with the
- declaration, the inclusive count and inclusive count fraction out of the
- total extracted inclusive count values.
-
- Args:
- file_name: The file containing the inclusive count values of the CWP
- functions.
-
- Returns:
- A dict containing the inclusive count statistics. The key is the name of
- the function and the file name, comma separated. The value represents a
- tuple with the object name containing the function declaration, the
- inclusive count and inclusive count fraction values, and a marker to
- identify if the function is present in one of the benchmark profiles.
- """
- cwp_inclusive_count_statistics = defaultdict(lambda: ('', 0, 0.0, 0))
-
- with open(file_name) as input_file:
- statistics_reader = csv.DictReader(input_file, delimiter=',')
- for statistic in statistics_reader:
- function_name = statistic['function']
- file_name = MakeCWPAndPprofFileNamesConsistent(
- os.path.normpath(statistic['file']))
- dso_name = statistic['dso']
- inclusive_count = statistic['inclusive_count']
- inclusive_count_fraction = statistic['inclusive_count_fraction']
-
- # We ignore the lines that have empty fields(i.e they specify only the
- # addresses of the functions and the inclusive counts values).
- if all([
- function_name, file_name, dso_name, inclusive_count,
- inclusive_count_fraction
- ]):
- key = '%s,%s' % (function_name, file_name)
-
- # There might be situations where a function appears in multiple files
- # or objects. Such situations can occur when in the Dremel queries there
- # are not specified the Chrome OS version and the name of the board (i.e
- # the files can belong to different kernel or library versions).
- inclusive_count_sum = \
- cwp_inclusive_count_statistics[key][1] + int(inclusive_count)
- inclusive_count_fraction_sum = \
- cwp_inclusive_count_statistics[key][2] + \
- float(inclusive_count_fraction)
-
- # All the functions are initially marked as EXTRA_FUNCTION.
- value = \
- (dso_name, inclusive_count_sum, inclusive_count_fraction_sum,
- EXTRA_FUNCTION)
- cwp_inclusive_count_statistics[key] = value
-
- return cwp_inclusive_count_statistics
-
-
-def ParseCWPPairwiseInclusiveCountFile(file_name):
- """Parses the CWP pairwise inclusive count files.
-
- A line of the file should contain a pair of a parent and a child function,
- concatenated by the PARENT_CHILD_FUNCTIONS_SEPARATOR, the name of the file
- where the child function is declared and the inclusive count fractions of
- the pair of functions out of the total amount of inclusive count values.
-
- Args:
- file_name: The file containing the pairwise inclusive_count statistics of
- the
- CWP functions.
-
- Returns:
- A dict containing the statistics of the parent functions and each of
- their child functions. The key of the dict is the name of the parent
- function. The value is a dict having as a key the name of the child
- function with its file name separated by a ',' and as a value the
- inclusive count value of the parent-child function pair.
- """
- pairwise_inclusive_count_statistics = defaultdict(lambda: defaultdict(float))
-
- with open(file_name) as input_file:
- statistics_reader = csv.DictReader(input_file, delimiter=',')
-
- for statistic in statistics_reader:
- parent_function_name, child_function_name = \
- statistic['parent_child_functions'].split(
- PARENT_CHILD_FUNCTIONS_SEPARATOR)
- child_function_file_name = MakeCWPAndPprofFileNamesConsistent(
- os.path.normpath(statistic['child_function_file']))
- inclusive_count = statistic['inclusive_count']
-
- # There might be situations where a child function appears in
- # multiple files or objects. Such situations can occur when in the
- # Dremel queries are not specified the Chrome OS version and the
- # name of the board (i.e the files can belong to different kernel or
- # library versions), when the child function is a template function
- # that is declared in a header file or there are name collisions
- # between multiple executable objects.
- # If a pair of child and parent functions appears multiple times, we
- # add their inclusive count values.
- child_function_key = ','.join(
- [child_function_name, child_function_file_name])
- pairwise_inclusive_count_statistics[parent_function_name] \
- [child_function_key] += float(inclusive_count)
-
- return pairwise_inclusive_count_statistics
diff --git a/user_activity_benchmarks/utils_unittest.py b/user_activity_benchmarks/utils_unittest.py
deleted file mode 100755
index 31bf83d3..00000000
--- a/user_activity_benchmarks/utils_unittest.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python2
-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Unit tests for the utility module."""
-
-import collections
-import csv
-import unittest
-
-import utils
-
-
-class UtilsTest(unittest.TestCase):
- """Test class for utility module."""
-
- def __init__(self, *args, **kwargs):
- super(UtilsTest, self).__init__(*args, **kwargs)
- self._pprof_top_csv_file = 'testdata/input/pprof_top_csv/file1.csv'
- self._pprof_top_file = 'testdata/input/pprof_top/file1.pprof'
- self._pprof_tree_csv_file = 'testdata/input/pprof_tree_csv/file1.csv'
- self._pprof_tree_file = 'testdata/input/pprof_tree/file1.pprof'
- self._pairwise_inclusive_count_test_file = \
- 'testdata/input/pairwise_inclusive_count_test.csv'
- self._pairwise_inclusive_count_reference_file = \
- 'testdata/input/pairwise_inclusive_count_reference.csv'
- self._inclusive_count_test_file = \
- 'testdata/input/inclusive_count_test.csv'
- self._inclusive_count_reference_file = \
- 'testdata/input/inclusive_count_reference.csv'
-
- def testParseFunctionGroups(self):
- cwp_function_groups_lines = \
- ['group1 /a\n', 'group2 /b\n', 'group3 /c\n', 'group4 /d\n']
- expected_output = [('group1', '/a'), ('group2', '/b'), ('group3', '/c'),
- ('group4', '/d')]
- result = utils.ParseFunctionGroups(cwp_function_groups_lines)
-
- self.assertListEqual(expected_output, result)
-
- def testParsePProfTopOutput(self):
- result_pprof_top_output = utils.ParsePprofTopOutput(self._pprof_top_file)
- expected_pprof_top_output = {}
-
- with open(self._pprof_top_csv_file) as input_file:
- statistics_reader = csv.DictReader(input_file, delimiter=',')
-
- for statistic in statistics_reader:
- if statistic['file']:
- function_key = ','.join([statistic['function'], statistic['file']])
- else:
- function_key = statistic['function']
- expected_pprof_top_output[function_key] = \
- (statistic['flat'], statistic['flat_p'], statistic['sum_p'],
- statistic['cum'], statistic['cum_p'])
-
- self.assertDictEqual(result_pprof_top_output, expected_pprof_top_output)
-
- def testParsePProfTreeOutput(self):
- result_pprof_tree_output = utils.ParsePprofTreeOutput(self._pprof_tree_file)
- expected_pprof_tree_output = collections.defaultdict(dict)
-
- with open(self._pprof_tree_csv_file) as input_file:
- statistics_reader = csv.DictReader(input_file, delimiter=',')
-
- for statistic in statistics_reader:
- parent_function_key = \
- ','.join([statistic['parent_function'],
- statistic['parent_function_file']])
- child_function_key = \
- ','.join([statistic['child_function'],
- statistic['child_function_file']])
-
- expected_pprof_tree_output[parent_function_key][child_function_key] = \
- float(statistic['inclusive_count_fraction'])
-
- self.assertDictEqual(result_pprof_tree_output, expected_pprof_tree_output)
-
- def testParseCWPInclusiveCountFile(self):
- expected_inclusive_statistics_test = \
- {'func_i,/c/d/file_i': ('i', 5, 4.4, utils.EXTRA_FUNCTION),
- 'func_j,/e/file_j': ('j', 6, 5.5, utils.EXTRA_FUNCTION),
- 'func_f,/a/b/file_f': ('f', 4, 2.3, utils.EXTRA_FUNCTION),
- 'func_h,/c/d/file_h': ('h', 1, 3.3, utils.EXTRA_FUNCTION),
- 'func_k,/e/file_k': ('k', 7, 6.6, utils.EXTRA_FUNCTION),
- 'func_g,/a/b/file_g': ('g', 2, 2.2, utils.EXTRA_FUNCTION)}
- expected_inclusive_statistics_reference = \
- {'func_i,/c/d/file_i': ('i', 5, 4.0, utils.EXTRA_FUNCTION),
- 'func_j,/e/file_j': ('j', 6, 5.0, utils.EXTRA_FUNCTION),
- 'func_f,/a/b/file_f': ('f', 1, 1.0, utils.EXTRA_FUNCTION),
- 'func_l,/e/file_l': ('l', 7, 6.0, utils.EXTRA_FUNCTION),
- 'func_h,/c/d/file_h': ('h', 4, 3.0, utils.EXTRA_FUNCTION),
- 'func_g,/a/b/file_g': ('g', 5, 4.4, utils.EXTRA_FUNCTION)}
- result_inclusive_statistics_test = \
- utils.ParseCWPInclusiveCountFile(self._inclusive_count_test_file)
- result_inclusive_statistics_reference = \
- utils.ParseCWPInclusiveCountFile(self._inclusive_count_reference_file)
-
- self.assertDictEqual(result_inclusive_statistics_test,
- expected_inclusive_statistics_test)
- self.assertDictEqual(result_inclusive_statistics_reference,
- expected_inclusive_statistics_reference)
-
- def testParseCWPPairwiseInclusiveCountFile(self):
- expected_pairwise_inclusive_statistics_test = {
- 'func_f': {'func_g,/a/b/file_g2': 0.01,
- 'func_h,/c/d/file_h': 0.02,
- 'func_i,/c/d/file_i': 0.03},
- 'func_g': {'func_j,/e/file_j': 0.4,
- 'func_m,/e/file_m': 0.6}
- }
- expected_pairwise_inclusive_statistics_reference = {
- 'func_f': {'func_g,/a/b/file_g': 0.1,
- 'func_h,/c/d/file_h': 0.2,
- 'func_i,/c/d/file_i': 0.3},
- 'func_g': {'func_j,/e/file_j': 0.4}
- }
- result_pairwise_inclusive_statistics_test = \
- utils.ParseCWPPairwiseInclusiveCountFile(
- self._pairwise_inclusive_count_test_file)
- result_pairwise_inclusive_statistics_reference = \
- utils.ParseCWPPairwiseInclusiveCountFile(
- self._pairwise_inclusive_count_reference_file)
-
- self.assertDictEqual(result_pairwise_inclusive_statistics_test,
- expected_pairwise_inclusive_statistics_test)
- self.assertDictEqual(result_pairwise_inclusive_statistics_reference,
- expected_pairwise_inclusive_statistics_reference)
-
-
-if __name__ == '__main__':
- unittest.main()