summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2017-08-23 22:47:50 -0700
committerZhizhou Yang <zhizhouy@google.com>2017-08-25 16:22:36 -0700
commit726ded53c7d2c413b0f1663612836041088d4c21 (patch)
treeac427e56689e971bc7f10c63695f768b916ac614
parente598690a6debaeab4489e539b78dca4e2cb58d6d (diff)
downloadbenchmark-726ded53c7d2c413b0f1663612836041088d4c21.tar.gz
Add scripts to generate and collect PGO profile
Added a script collect_profile.py, which provides a simple way for user to generate and collect PGO profile data based on each benchmark. Also added mechanism in autotest to pull profraw data files to local directory, which can be used as a helper util for the profile collecting script. The collect_profile.py mainly have three parts: 1) Call run.py with flags set to -fprofile-generate, so that the benchmark wil be built with PGO instrumentation and run on the devices to generate profraw data. 2) Use autotest framework to pull the profraw data to a local directory. Since autotest provides a method which can only send one file at a time, I compressed all the profraw data on the devices to a single tar file and transfer it. 3) Call llvm-profdata command locally to convert the profraw data into the format that llvm can recognize. The users need to specify which benchmark to collect, may also tell the device serial, remote machine address, and the path to store the profdata. Test: None. Change-Id: I4d7ed5f056806eb8099fe2e9e1b912adb7962d33
-rw-r--r--autotest.diff212
-rwxr-xr-xcollect_profile.py139
2 files changed, 349 insertions, 2 deletions
diff --git a/autotest.diff b/autotest.diff
index ef0029ae..95ef2e4d 100644
--- a/autotest.diff
+++ b/autotest.diff
@@ -237,7 +237,7 @@ index 000000000..763864f3a
+parallel_simple(run_dex2oat, machines)
diff --git a/server/site_tests/android_Hwui/android_Hwui.py b/server/site_tests/android_Hwui/android_Hwui.py
new file mode 100644
-index 000000000..d1837e042
+index 000000000..9a1accb09
--- /dev/null
+++ b/server/site_tests/android_Hwui/android_Hwui.py
@@ -0,0 +1,67 @@
@@ -360,7 +360,7 @@ index 000000000..89c47da20
+parallel_simple(run_hwui_test, machines)
diff --git a/server/site_tests/android_Panorama/android_Panorama.py b/server/site_tests/android_Panorama/android_Panorama.py
new file mode 100644
-index 000000000..db2a29cde
+index 000000000..89b2355e5
--- /dev/null
+++ b/server/site_tests/android_Panorama/android_Panorama.py
@@ -0,0 +1,53 @@
@@ -467,6 +467,92 @@ index 000000000..3cd589eed
+ job.run_test("android_Panorama", host=host)
+
+parallel_simple(run_panorama_test, machines)
+diff --git a/server/site_tests/android_Pull/android_Pull.py b/server/site_tests/android_Pull/android_Pull.py
+new file mode 100644
+index 000000000..cff373899
+--- /dev/null
++++ b/server/site_tests/android_Pull/android_Pull.py
+@@ -0,0 +1,30 @@
++# Pull profraw data from device
++from __future__ import print_function
++
++import bench_config
++
++from autotest_lib.server import test
++
++class android_Pull(test.test):
++ version = 1
++
++ def run_once(self, host=None):
++ self.client = host
++
++ # Tar all the files in profraw directory
++ tar_file= bench_config.location_DUT + '.tar'
++ raw_cmd = ('tar -cvf {tar_file} {location_DUT}'.format(
++ tar_file=tar_file,
++ location_DUT=bench_config.location_DUT))
++ self.client.run(raw_cmd)
++
++ # Pull tar of profraw data from the device
++ out_dir = bench_config.location
++
++ host.get_file(tar_file, out_dir, delete_dest=True)
++
++ # Remove the data on the device
++ self.client.run('rm %s' % tar_file)
++ self.client.run('rm -rf %s' % bench_config.location_DUT)
++
++ print('Profraw data has been pulled from device to local.')
+diff --git a/server/site_tests/android_Pull/bench_config.py b/server/site_tests/android_Pull/bench_config.py
+new file mode 100644
+index 000000000..37967c2f9
+--- /dev/null
++++ b/server/site_tests/android_Pull/bench_config.py
+@@ -0,0 +1,19 @@
++#!/bin/bash/python
++import os
++
++home = os.environ["HOME"]
++
++android_home = os.getenv("ANDROID_HOME",
++ default=os.path.join(home,
++ 'android_source/master-googleplex/'))
++bench_suite_dir = os.getenv('BENCH_SUITE_DIR',
++ default=os.path.join(android_home,
++ 'benchtoolchain'))
++
++bench = os.getenv('BENCH', default='Hwui')
++location_DUT = os.getenv('LOCATION_DUT',
++ default=os.path.join('/data/local/tmp',
++ bench + '_profraw'))
++location = os.getenv('LOCATION', default=bench_suite_dir)
++
++product = os.getenv("PRODUCT", default="generic")
+diff --git a/server/site_tests/android_Pull/control b/server/site_tests/android_Pull/control
+new file mode 100644
+index 000000000..7b00df7cb
+--- /dev/null
++++ b/server/site_tests/android_Pull/control
+@@ -0,0 +1,19 @@
++#Control
++
++NAME = "Pull"
++AUTHOR = "Zhizhou Yang"
++ATTRIBUTES = "suite:android_toolchain_benchmark"
++TIME = "MEDIUM"
++TEST_CATEGORY = "Functional"
++TEST_CLASS = "library"
++TEST_TYPE = "server"
++
++DOC = """
++
++"""
++
++def run_pull_test(machine):
++ host = hosts.create_host(machine)
++ job.run_test("android_Pull", host=host)
++
++parallel_simple(run_pull_test, machines)
diff --git a/server/site_tests/android_SetDevice/android_SetDevice.py b/server/site_tests/android_SetDevice/android_SetDevice.py
new file mode 100644
index 000000000..7a7134d58
@@ -800,6 +886,128 @@ index 000000000..144766351
+ job.run_test("android_Synthmark", host=host)
+
+parallel_simple(run_synthmark_test, machines)
+diff --git a/site_utils/pull_device.py b/site_utils/pull_device.py
+new file mode 100755
+index 000000000..959c4443d
+--- /dev/null
++++ b/site_utils/pull_device.py
+@@ -0,0 +1,116 @@
++#!/usr/bin/python
++#
++# Script to pull data from android device
++from __future__ import print_function
++
++import argparse
++import common
++import logging
++import os
++import sys
++
++# Turn the logging level to INFO before importing other autotest
++# code, to avoid having failed import logging messages confuse the
++# test_droid user.
++logging.basicConfig(level=logging.INFO)
++
++# Unfortunately, autotest depends on external packages for assorted
++# functionality regardless of whether or not it is needed in a particular
++# context.
++# Since we can't depend on people to import these utilities in any principled
++# way, we dynamically download code before any autotest imports.
++try:
++ import chromite.lib.terminal # pylint: disable=unused-import
++ import django.http # pylint: disable=unused-import
++except ImportError:
++ # Ensure the chromite site-package is installed.
++ import subprocess
++ build_externals_path = os.path.join(
++ os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
++ 'utils', 'build_externals.py')
++ subprocess.check_call([build_externals_path, '--names_to_check',
++ 'chromiterepo', 'django'])
++ # Restart the script so python now finds the autotest site-packages.
++ sys.exit(os.execv(__file__, sys.argv))
++
++from autotest_lib.client.common_lib import utils
++from autotest_lib.server.hosts import adb_host
++from autotest_lib.site_utils import test_runner_utils
++from autotest_lib.site_utils import tester_feedback
++
++def _parse_arguments_internal(argv):
++ """
++ Parse command line arguments
++
++ @param argv: argument list to parse
++
++ @returns: tuple of parsed arguments and argv suitable for remote runs
++
++ @raises SystemExit if arguments are malformed, or required arguments
++ are not present.
++ """
++
++ parser = argparse.ArgumentParser(description='Run remote tests.')
++
++ parser.add_argument('-b', '--bench', metavar='BENCH', required=True,
++ help='Select the benchmark want to be run for '
++ 'test.')
++ parser.add_argument('-s', '--serials', metavar='SERIALS',
++ help='Comma separate list of device serials under '
++ 'test.')
++ parser.add_argument('-r', '--remote', metavar='REMOTE',
++ default='localhost',
++ help='hostname[:port] if the ADB device is connected '
++ 'to a remote machine. Ensure this workstation '
++ 'is configured for passwordless ssh access as '
++ 'users "root" or "adb"')
++
++ parser.add_argument('-d', '--pathDUT',
++ help='Specify the location to put the file on DUT.')
++ parser.add_argument('-p', '--path',
++ help='Specify the location to put the file locally.')
++
++ return parser.parse_args(argv)
++
++def main(argv):
++ """
++ Entry point for pull_device script.
++
++ @param argv: arguments list
++ """
++ arguments = _parse_arguments_internal(argv)
++
++ serials = arguments.serials
++ if serials is None:
++ result = utils.run(['adb', 'devices'])
++ devices = adb_host.ADBHost.parse_device_serials(result.stdout)
++ if len(devices) != 1:
++ logging.error('Could not detect exactly one device; please select '
++ 'one with -s: %s', devices)
++ return 1
++ serials = devices[0]
++
++ autotest_path = os.path.dirname(os.path.dirname(
++ os.path.realpath(__file__)))
++ site_utils_path = os.path.join(autotest_path, 'site_utils')
++ realpath = os.path.realpath(__file__)
++ site_utils_path = os.path.realpath(site_utils_path)
++ host_attributes = {'serials': serials,
++ 'os_type': 'android'}
++ results_directory = test_runner_utils.create_results_directory(None)
++
++ os.environ['BENCH'] = arguments.bench
++ os.environ['LOCATION_DUT'] = arguments.pathDUT
++ os.environ['LOCATION'] = arguments.path
++
++ tests = ['Pull']
++
++ if test_runner_utils.perform_run_from_autotest_root(
++ autotest_path, argv, tests, arguments.remote,
++ host_attributes=host_attributes,
++ results_directory=results_directory):
++ logging.error('Error while running on device.')
++ return 1
++
++if __name__ == '__main__':
++ sys.exit(main(sys.argv[1:]))
diff --git a/site_utils/set_device.py b/site_utils/set_device.py
new file mode 100755
index 000000000..abb8a8dcc
diff --git a/collect_profile.py b/collect_profile.py
new file mode 100755
index 00000000..27d5e5d6
--- /dev/null
+++ b/collect_profile.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+
+# Script to generate and collect PGO data based on benchmark
+from __future__ import print_function
+
+import argparse
+import config
+import logging
+import os
+import subprocess
+import sys
+import tempfile
+
+# Turn the logging level to INFO before importing other code, to avoid having
+# failed import logging messages confuse the user.
+logging.basicConfig(level=logging.INFO)
+
+def _parse_arguments_internal(argv):
+ """
+ Parse command line arguments
+
+ @param argv: argument list to parse
+
+ @returns: tuple of parsed arguments and argv suitable for remote runs
+
+ @raises SystemExit if arguments are malformed, or required arguments
+ are not present.
+ """
+
+ parser = argparse.ArgumentParser(description='Run this script to collect '
+ 'PGO data.')
+
+ parser.add_argument('-b', '--bench',
+ help='Select which benchmark to collect profdata.')
+
+ parser.add_argument('-d', '--pathDUT', default='/data/local/tmp',
+ help='Specify where to generate PGO data on device, '
+ 'set to /data/local/tmp by default.')
+
+ parser.add_argument('-p', '--path', default=config.bench_suite_dir,
+ help='Specify the location to put the profdata, set '
+ ' to bench_suite_dir by default.')
+
+ parser.add_argument('-s', '--serial',
+ help='Device serial number.')
+
+ parser.add_argument('-r', '--remote', default='localhost',
+ help='hostname[:port] if the ADB device is connected '
+ 'to a remote machine. Ensure this workstation '
+ 'is configured for passwordless ssh access as '
+ 'users "root" or "adb"')
+ return parser.parse_args(argv)
+
+# Call run.py to build benchmark with -fprofile-generate flags and run on DUT
+def run_suite(bench, serial, remote, pathDUT):
+ logging.info('Build and run instrumented benchmark...')
+ run_cmd = ['./run.py', '-b=' + bench]
+ if serial:
+ run_cmd.append('-s=' + serial)
+ run_cmd.append('-r=' + remote)
+ run_cmd.append('-f=-fprofile-generate=%s' % pathDUT)
+ run_cmd.append('--ldflags=-fprofile-generate=%s' % pathDUT)
+ try:
+ subprocess.check_call(run_cmd)
+ except subprocess.CalledProcessError:
+ logging.error('Error running %s.', run_cmd)
+ raise
+
+# Pull profraw data from device using pull_device.py script in autotest utils.
+def pull_result(bench, serial, remote, pathDUT, path):
+ logging.info('Pulling profraw data from device to local')
+ pull_cmd = [os.path.join(config.android_home,
+ config.autotest_dir,
+ 'site_utils/pull_device.py')]
+ pull_cmd.append('-b=' + bench)
+ pull_cmd.append('-r=' + remote)
+ if serial:
+ pull_cmd.append('-s=' + serial)
+ pull_cmd.append('-p=' + path)
+ pull_cmd.append('-d=' + pathDUT)
+ try:
+ subprocess.check_call(pull_cmd)
+ except:
+ logging.error('Error while pulling profraw data.')
+ raise
+
+# Use llvm-profdata tool to convert profraw data to the format llvm can
+# recgonize.
+def merge(bench, pathDUT, path):
+ logging.info('Generate profdata for PGO...')
+ # Untar the compressed rawdata file collected from device
+ tmp_dir = tempfile.mkdtemp()
+ untar_cmd = ['tar',
+ '-xf',
+ os.path.join(path, bench + '_profraw.tar'),
+ '-C',
+ tmp_dir]
+
+ # call llvm-profdata to merge the profraw data
+ profdata = os.path.join(path, bench + '.profdata')
+ merge_cmd = ['llvm-profdata',
+ 'merge',
+ '-output=' + profdata,
+ tmp_dir + pathDUT]
+ try:
+ subprocess.check_call(untar_cmd)
+ subprocess.check_call(merge_cmd)
+ logging.info('Profdata is generated successfully, located at %s',
+ profdata)
+ except:
+ logging.error('Error while merging profraw data.')
+ raise
+ finally:
+ subprocess.check_call(['rm', '-rf', tmp_dir])
+
+def main(argv):
+ """
+ Entry point for nightly_run script.
+
+ @param argv: arguments list
+ """
+ arguments = _parse_arguments_internal(argv)
+
+ bench = arguments.bench
+ serial = arguments.serial
+ path = arguments.path
+ remote = arguments.remote
+
+ # Create a profraw directory to collect data
+ pathDUT = os.path.join(arguments.pathDUT, bench + '_profraw')
+
+ run_suite(bench, serial, remote, pathDUT)
+
+ pull_result(bench, serial, remote, pathDUT, path)
+
+ merge(bench, pathDUT, path)
+
+if __name__ == '__main__':
+ main(sys.argv[1:])