summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2017-08-31 08:07:31 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2017-08-31 08:07:31 +0000
commitb40998676a2ca2169e5fd10c1935c1fd9f542eba (patch)
treeef6a2089354fb12e0931401da2bfcecdb7a20d3c
parent48c78d0e2fd61dbc3136ce9dd76e6352f16f0700 (diff)
parent530c7ff3899052be70a65734bab0d81e72ee5c42 (diff)
downloadbenchmark-b40998676a2ca2169e5fd10c1935c1fd9f542eba.tar.gz
release-request-48587b2e-b0d9-4e33-ae5f-05bd16bc9d6f-for-git_pi-release-4308806 snap-temp-L91400000098269614
Change-Id: Idfca2de2c84404d9de781304733f5c625cbec22c
-rwxr-xr-xapply_patches.py148
-rwxr-xr-xbuild_bench.py299
-rw-r--r--config.py14
-rwxr-xr-xdiscard_patches.py64
-rwxr-xr-xfix_json.py60
-rwxr-xr-xfix_skia_results.py175
-rwxr-xr-xgen_json.py100
-rw-r--r--parse_result.py168
-rwxr-xr-xrun.py784
-rw-r--r--set_flags.py165
10 files changed, 999 insertions, 978 deletions
diff --git a/apply_patches.py b/apply_patches.py
index ff706f1d..4134ccf6 100755
--- a/apply_patches.py
+++ b/apply_patches.py
@@ -17,90 +17,94 @@ import subprocess
# An error may occur if it is already patched, or meets some error.
# FIXME: Needs to be FIXED in the future.
def try_patch_skia():
- skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
- # You may want to change the file based on aosp or internal
- if config.android_type == 'internal':
- print('No need to patch skia for internal repo.')
- return
- elif config.android_type == 'aosp':
- skia_patch = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), 'skia_aosp.diff')
- else:
- raise ValueError('Adnroid source type should be either aosp or internal.')
- # FIXME: A quick hack, need to handle errors and check whether has been
- # applied in the future.
- try:
- subprocess.check_call(['git', '-C', skia_dir, 'apply', skia_patch])
- print('Skia patched successfully!')
- except subprocess.CalledProcessError:
- print('Skia patch not applied, error or already patched.')
+ skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
+ # You may want to change the file based on aosp or internal
+ if config.android_type == 'internal':
+ print('No need to patch skia for internal repo.')
+ return
+ elif config.android_type == 'aosp':
+ skia_patch = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'skia_aosp.diff')
+ else:
+ raise ValueError('Adnroid source type should be either aosp or '
+ 'internal.')
+ # FIXME: A quick hack, need to handle errors and check whether has been
+ # applied in the future.
+ try:
+ subprocess.check_call(['git', '-C', skia_dir, 'apply', skia_patch])
+ print('Skia patched successfully!')
+ except subprocess.CalledProcessError:
+ print('Skia patch not applied, error or already patched.')
def try_patch_autotest():
- # Patch autotest, which includes all the testcases on device, setting device,
- # and running the benchmarks
- autotest_dir = os.path.join(config.android_home, config.autotest_dir)
- autotest_patch = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), 'autotest.diff')
- dex2oat_dir = os.path.join(autotest_dir, 'server/site_tests/android_Dex2oat')
- panorama_dir = os.path.join(autotest_dir,
- 'server/site_tests/android_Panorama')
- # FIXME: A quick hack, need to handle errors and check whether has been
- # applied in the future.
- try:
- subprocess.check_call(['git', '-C', autotest_dir, 'apply', autotest_patch])
- subprocess.check_call(['cp', '-rf', 'dex2oat_input', dex2oat_dir])
- subprocess.check_call(['cp', '-rf', 'panorama_input', panorama_dir])
- print('Autotest patched successfully!')
- except subprocess.CalledProcessError:
- print('Autotest patch not applied, error or already patched.')
+ # Patch autotest, which includes all the testcases on device,
+ # setting device, and running the benchmarks
+ autotest_dir = os.path.join(config.android_home, config.autotest_dir)
+ autotest_patch = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'autotest.diff')
+ dex2oat_dir = os.path.join(autotest_dir,
+ 'server/site_tests/android_Dex2oat')
+ panorama_dir = os.path.join(autotest_dir,
+ 'server/site_tests/android_Panorama')
+ # FIXME: A quick hack, need to handle errors and check whether has been
+ # applied in the future.
+ try:
+ subprocess.check_call(['git', '-C', autotest_dir,
+ 'apply', autotest_patch])
+ subprocess.check_call(['cp', '-rf', 'dex2oat_input', dex2oat_dir])
+ subprocess.check_call(['cp', '-rf', 'panorama_input', panorama_dir])
+ print('Autotest patched successfully!')
+ except subprocess.CalledProcessError:
+ print('Autotest patch not applied, error or already patched.')
def try_patch_panorama():
- panorama_dir = os.path.join(config.android_home,
- config.bench_dict['Panorama'])
- panorama_patch = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), 'panorama.diff')
- # FIXME: A quick hack, need to handle errors and check whether has been
- # applied in the future.
- try:
- subprocess.check_call(['mkdir', '-p', panorama_dir])
- subprocess.check_call(['git', '-C', panorama_dir, 'apply', panorama_patch])
- print('Panorama patched successfully!')
- except subprocess.CalledProcessError:
- print('Panorama patch not applied, error or already patched.')
+ panorama_dir = os.path.join(config.android_home,
+ config.bench_dict['Panorama'])
+ panorama_patch = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'panorama.diff')
+ # FIXME: A quick hack, need to handle errors and check whether has been
+ # applied in the future.
+ try:
+ subprocess.check_call(['mkdir', '-p', panorama_dir])
+ subprocess.check_call(['git', '-C', panorama_dir,
+ 'apply', panorama_patch])
+ print('Panorama patched successfully!')
+ except subprocess.CalledProcessError:
+ print('Panorama patch not applied, error or already patched.')
def try_patch_synthmark():
- synthmark_dir = '/tmp/devrel/tools/synthmark'
- # FIXME: A quick hack, need to handle errors and check whether has been
- # applied in the future.
- try:
- subprocess.check_call([
- 'bash', '-c', 'cd /tmp && '
- 'rm -rf devrel && '
- 'mkdir devrel && '
- 'cd devrel && '
- 'repo init -u sso://devrel/manifest && '
- 'repo sync tools/synthmark'
- ])
- synthmark_patch = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), 'synthmark.diff')
- subprocess.check_call(['git', '-C', synthmark_dir,
- 'apply', synthmark_patch])
+ synthmark_dir = '/tmp/devrel/tools/synthmark'
+ # FIXME: A quick hack, need to handle errors and check whether has been
+ # applied in the future.
+ try:
+ subprocess.check_call([
+ 'bash', '-c', 'cd /tmp && '
+ 'rm -rf devrel && '
+ 'mkdir devrel && '
+ 'cd devrel && '
+ 'repo init -u sso://devrel/manifest && '
+ 'repo sync tools/synthmark'
+ ])
+ synthmark_patch = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'synthmark.diff')
+ subprocess.check_call(['git', '-C', synthmark_dir,
+ 'apply', synthmark_patch])
- subprocess.check_call(['mv', '-f', synthmark_dir, config.android_home])
- subprocess.check_call(['rm', '-rf', '/tmp/devrel'])
- print('Synthmark patched successfully!')
- except subprocess.CalledProcessError:
- print('Synthmark patch not applied, error or already patched.')
+ subprocess.check_call(['mv', '-f', synthmark_dir, config.android_home])
+ subprocess.check_call(['rm', '-rf', '/tmp/devrel'])
+ print('Synthmark patched successfully!')
+ except subprocess.CalledProcessError:
+ print('Synthmark patch not applied, error or already patched.')
def main():
- try_patch_skia()
- try_patch_panorama()
- try_patch_autotest()
- try_patch_synthmark()
+ try_patch_skia()
+ try_patch_panorama()
+ try_patch_autotest()
+ try_patch_synthmark()
if __name__ == '__main__':
- main()
+ main()
diff --git a/build_bench.py b/build_bench.py
index 44ad7a0d..5b65cb5b 100755
--- a/build_bench.py
+++ b/build_bench.py
@@ -22,207 +22,214 @@ logging.basicConfig(level=logging.INFO)
def _parse_arguments_internal(argv):
- parser = argparse.ArgumentParser(description='Build benchmarks with '
- 'specified toolchain settings')
+ parser = argparse.ArgumentParser(description='Build benchmarks with '
+ 'specified toolchain settings')
- parser.add_argument(
- '-b', '--bench', required=True, help='Select the benchmark to be built.')
+ parser.add_argument(
+ '-b',
+ '--bench',
+ required=True,
+ help='Select the benchmark to be built.')
- parser.add_argument(
- '-c',
- '--compiler_dir',
- metavar='DIR',
- help='Specify the path to the compiler bin '
- 'directory.')
+ parser.add_argument(
+ '-c',
+ '--compiler_dir',
+ metavar='DIR',
+ help='Specify the path to the compiler bin '
+ 'directory.')
- parser.add_argument(
- '-o', '--build_os', help='Specify the host OS to build benchmark.')
+ parser.add_argument(
+ '-o',
+ '--build_os',
+ help='Specify the host OS to build benchmark.')
- parser.add_argument(
- '-l',
- '--llvm_prebuilts_version',
- help='Specify the version of prebuilt LLVM.')
+ parser.add_argument(
+ '-l',
+ '--llvm_prebuilts_version',
+ help='Specify the version of prebuilt LLVM.')
- parser.add_argument(
- '-f',
- '--cflags',
- help='Specify the optimization cflags for '
- 'the toolchain.')
+ parser.add_argument(
+ '-f',
+ '--cflags',
+ help='Specify the optimization cflags for the toolchain.')
- parser.add_argument(
- '--ldflags', help='Specify linker flags for the toolchain.')
+ parser.add_argument(
+ '--ldflags',
+ help='Specify linker flags for the toolchain.')
- return parser.parse_args(argv)
+ return parser.parse_args(argv)
# Set flags for compiling benchmarks, by changing the local
# CFLAGS/LDFLAGS in the android makefile of each benchmark
def set_flags(bench, cflags, ldflags):
- if not cflags:
- logging.info('No CFLAGS specified, using default settings.')
- cflags = ''
- else:
- logging.info('Cflags setting to "%s"...', cflags)
+ if not cflags:
+ logging.info('No CFLAGS specified, using default settings.')
+ cflags = ''
+ else:
+ logging.info('Cflags setting to "%s"...', cflags)
- if not ldflags:
- logging.info('No LDFLAGS specifed, using default settings.')
- ldflags = ''
- else:
- logging.info('Ldflags setting to "%s"...', ldflags)
+ if not ldflags:
+ logging.info('No LDFLAGS specifed, using default settings.')
+ ldflags = ''
+ else:
+ logging.info('Ldflags setting to "%s"...', ldflags)
- add_flags = config.bench_flags_dict[bench]
- add_flags(cflags, ldflags)
- logging.info('Flags set successfully!')
+ add_flags = config.bench_flags_dict[bench]
+ add_flags(cflags, ldflags)
+ logging.info('Flags set successfully!')
def set_build_os(build_os):
- # Set $BUILD_OS variable for android makefile
- if build_os:
- os.environ['BUILD_OS'] = build_os
- logging.info('BUILD_OS set to "%s"...', build_os)
- else:
- logging.info('No BUILD_OS specified, using linux as default...')
+ # Set $BUILD_OS variable for android makefile
+ if build_os:
+ os.environ['BUILD_OS'] = build_os
+ logging.info('BUILD_OS set to "%s"...', build_os)
+ else:
+ logging.info('No BUILD_OS specified, using linux as default...')
def set_llvm_prebuilts_version(llvm_prebuilts_version):
- # Set $LLVM_PREBUILTS_VERSION for android makefile
- if llvm_prebuilts_version:
- os.environ['LLVM_PREBUILTS_VERSION'] = llvm_prebuilts_version
- logging.info('LLVM_PREBUILTS_VERSION set to "%s"...',
- llvm_prebuilts_version)
- else:
- logging.info('No LLVM_PREBUILTS_VERSION specified, using default one...')
+ # Set $LLVM_PREBUILTS_VERSION for android makefile
+ if llvm_prebuilts_version:
+ os.environ['LLVM_PREBUILTS_VERSION'] = llvm_prebuilts_version
+ logging.info('LLVM_PREBUILTS_VERSION set to "%s"...',
+ llvm_prebuilts_version)
+ else:
+ logging.info('No LLVM_PREBUILTS_VERSION specified, '
+ 'using default one...')
def set_compiler(compiler):
- # If compiler_dir has been specified, copy the binaries to
- # a temporary location, set BUILD_OS and LLVM_PREBUILTS_VERSION
- # variables to the location
- if compiler:
- # Report error if path not exits
- if not os.path.isdir(compiler):
- logging.error('Error while setting compiler: '
- 'Directory %s does not exist!', compiler)
- raise OSError('Directory %s not exist.' % compiler)
-
- # Specify temporary directory for compiler
- tmp_dir = os.path.join(config.android_home,
- 'prebuilts/clang/host/linux-x86', 'clang-tmp')
+ # If compiler_dir has been specified, copy the binaries to
+ # a temporary location, set BUILD_OS and LLVM_PREBUILTS_VERSION
+ # variables to the location
+ if compiler:
+ # Report error if path not exits
+ if not os.path.isdir(compiler):
+ logging.error('Error while setting compiler: '
+ 'Directory %s does not exist!', compiler)
+ raise OSError('Directory %s not exist.' % compiler)
- compiler_content = os.path.join(compiler, '.')
+ # Specify temporary directory for compiler
+ tmp_dir = os.path.join(config.android_home,
+ 'prebuilts/clang/host/linux-x86', 'clang-tmp')
- # Copy compiler to new directory
- try:
- subprocess.check_call(['cp', '-rf', compiler_content, tmp_dir])
- except subprocess.CalledProcessError:
- logging.error('Error while copying the compiler to '
- 'temporary directory %s!', tmp_dir)
- raise
+ compiler_content = os.path.join(compiler, '.')
- # Set environment variable
- os.environ['LLVM_PREBUILTS_VERSION'] = 'clang-tmp'
+ # Copy compiler to new directory
+ try:
+ subprocess.check_call(['cp', '-rf', compiler_content, tmp_dir])
+ except subprocess.CalledProcessError:
+ logging.error('Error while copying the compiler to '
+ 'temporary directory %s!', tmp_dir)
+ raise
- logging.info('Prebuilt Compiler set as %s.', os.path.abspath(compiler))
+ # Set environment variable
+ os.environ['LLVM_PREBUILTS_VERSION'] = 'clang-tmp'
+
+ logging.info('Prebuilt Compiler set as %s.', os.path.abspath(compiler))
def set_compiler_env(bench, compiler, build_os, llvm_prebuilts_version, cflags,
ldflags):
- logging.info('Setting compiler options for benchmark...')
+ logging.info('Setting compiler options for benchmark...')
- # If no specific prebuilt compiler directory, use BUILD_OS and
- # LLVM_PREBUILTS_VERSION to set the compiler version.
- # Otherwise, use the new prebuilt compiler.
- if not compiler:
- set_build_os(build_os)
- set_llvm_prebuilts_version(llvm_prebuilts_version)
- else:
- set_compiler(compiler)
+ # If no specific prebuilt compiler directory, use BUILD_OS and
+ # LLVM_PREBUILTS_VERSION to set the compiler version.
+ # Otherwise, use the new prebuilt compiler.
+ if not compiler:
+ set_build_os(build_os)
+ set_llvm_prebuilts_version(llvm_prebuilts_version)
+ else:
+ set_compiler(compiler)
- set_flags(bench, cflags, ldflags)
+ set_flags(bench, cflags, ldflags)
- return 0
+ return 0
def remove_tmp_dir():
- tmp_dir = os.path.join(config.android_home, 'prebuilts/clang/host/linux-x86',
- 'clang-tmp')
+ tmp_dir = os.path.join(config.android_home,
+ 'prebuilts/clang/host/linux-x86',
+ 'clang-tmp')
- try:
- subprocess.check_call(['rm', '-r', tmp_dir])
- except subprocess.CalledProcessError:
- logging.error('Error while removing the temporary '
- 'compiler directory %s!', tmp_dir)
- raise
+ try:
+ subprocess.check_call(['rm', '-r', tmp_dir])
+ except subprocess.CalledProcessError:
+ logging.error('Error while removing the temporary '
+ 'compiler directory %s!', tmp_dir)
+ raise
# Recover the makefile/blueprint from our patch after building
def restore_makefile(bench):
- pwd = os.path.join(config.android_home, config.bench_dict[bench])
- mk_file = os.path.join(pwd, 'Android.mk')
- if not os.path.exists(mk_file):
- mk_file = os.path.join(pwd, 'Android.bp')
- subprocess.check_call(['mv', os.path.join(pwd, 'tmp_makefile'), mk_file])
+ pwd = os.path.join(config.android_home, config.bench_dict[bench])
+ mk_file = os.path.join(pwd, 'Android.mk')
+ if not os.path.exists(mk_file):
+ mk_file = os.path.join(pwd, 'Android.bp')
+ subprocess.check_call(['mv', os.path.join(pwd, 'tmp_makefile'), mk_file])
# Run script to build benchmark
def build_bench(bench, source_dir):
- logging.info('Start building benchmark...')
-
- raw_cmd = ('cd {android_home} '
- '&& source build/envsetup.sh '
- '&& lunch {product_combo} '
- '&& mmma {source_dir} -j48'.format(
- android_home=config.android_home,
- product_combo=config.product_combo,
- source_dir=source_dir))
-
- log_file = os.path.join(config.bench_suite_dir, 'build_log')
- with open(log_file, 'a') as logfile:
- log_head = 'Log for building benchmark: %s\n' % (bench)
- logfile.write(log_head)
- try:
- subprocess.check_call(
- ['bash', '-c', raw_cmd], stdout=logfile, stderr=logfile)
- except subprocess.CalledProcessError:
- logging.error('Error while running %s, please check '
- '%s for more info.', raw_cmd, log_file)
- restore_makefile(bench)
- raise
-
- logging.info('Logs for building benchmark %s are written to %s.', bench,
- log_file)
- logging.info('Benchmark built successfully!')
+ logging.info('Start building benchmark...')
+
+ raw_cmd = ('cd {android_home} '
+ '&& source build/envsetup.sh '
+ '&& lunch {product_combo} '
+ '&& mmma {source_dir} -j48'.format(
+ android_home=config.android_home,
+ product_combo=config.product_combo,
+ source_dir=source_dir))
+
+ log_file = os.path.join(config.bench_suite_dir, 'build_log')
+ with open(log_file, 'a') as logfile:
+ log_head = 'Log for building benchmark: %s\n' % (bench)
+ logfile.write(log_head)
+ try:
+ subprocess.check_call(
+ ['bash', '-c', raw_cmd], stdout=logfile, stderr=logfile)
+ except subprocess.CalledProcessError:
+ logging.error('Error while running %s, please check '
+ '%s for more info.', raw_cmd, log_file)
+ restore_makefile(bench)
+ raise
+
+ logging.info('Logs for building benchmark %s are written to %s.',
+ bench, log_file)
+ logging.info('Benchmark built successfully!')
def main(argv):
- arguments = _parse_arguments_internal(argv)
+ arguments = _parse_arguments_internal(argv)
- bench = arguments.bench
- compiler = arguments.compiler_dir
- build_os = arguments.build_os
- llvm_version = arguments.llvm_prebuilts_version
- cflags = arguments.cflags
- ldflags = arguments.ldflags
+ bench = arguments.bench
+ compiler = arguments.compiler_dir
+ build_os = arguments.build_os
+ llvm_version = arguments.llvm_prebuilts_version
+ cflags = arguments.cflags
+ ldflags = arguments.ldflags
- try:
- source_dir = config.bench_dict[bench]
- except KeyError:
- logging.error('Please select one benchmark from the list below:\n\t' +
- '\n\t'.join(config.bench_list))
- raise
+ try:
+ source_dir = config.bench_dict[bench]
+ except KeyError:
+ logging.error('Please select one benchmark from the list below:\n\t' +
+ '\n\t'.join(config.bench_list))
+ raise
- set_compiler_env(bench, compiler, build_os, llvm_version, cflags, ldflags)
+ set_compiler_env(bench, compiler, build_os, llvm_version, cflags, ldflags)
- build_bench(bench, source_dir)
+ build_bench(bench, source_dir)
- # If flags has been set, remember to restore the makefile/blueprint to
- # original ones.
- restore_makefile(bench)
+ # If flags has been set, remember to restore the makefile/blueprint to
+ # original ones.
+ restore_makefile(bench)
- # If a tmp directory is used for compiler path, remove it after building.
- if compiler:
- remove_tmp_dir()
+ # If a tmp directory is used for compiler path, remove it after building.
+ if compiler:
+ remove_tmp_dir()
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/config.py b/config.py
index e793cfa7..00160a85 100644
--- a/config.py
+++ b/config.py
@@ -29,13 +29,13 @@ env_config = ConfigParser.ConfigParser(allow_no_value=True)
env_config.read('env_setting')
def get_suite_env(name, path=False):
- variable = env_config.get('Suite_Environment', name)
- if variable:
- if path and not os.path.isdir(variable):
- raise ValueError('The path of %s does not exist.' % name)
- return variable
- else:
- raise ValueError('Please specify %s in env_setting' % name)
+ variable = env_config.get('Suite_Environment', name)
+ if variable:
+ if path and not os.path.isdir(variable):
+ raise ValueError('The path of %s does not exist.' % name)
+ return variable
+ else:
+ raise ValueError('Please specify %s in env_setting' % name)
# Android source code type: internal or aosp
android_type = get_suite_env('android_type')
diff --git a/discard_patches.py b/discard_patches.py
index 4dd99ead..a6383f6b 100755
--- a/discard_patches.py
+++ b/discard_patches.py
@@ -13,53 +13,53 @@ import subprocess
def discard_git(path):
- try:
- subprocess.check_call(['git', '-C', path, 'reset'])
- subprocess.check_call(['git', '-C', path, 'clean', '-fdx'])
- subprocess.check_call(['git', '-C', path, 'stash'])
- print('Patch in %s removed successfully!' % path)
- except subprocess.CalledProcessError:
- print('Error while removing patch in %s' % path)
+ try:
+ subprocess.check_call(['git', '-C', path, 'reset'])
+ subprocess.check_call(['git', '-C', path, 'clean', '-fdx'])
+ subprocess.check_call(['git', '-C', path, 'stash'])
+ print('Patch in %s removed successfully!' % path)
+ except subprocess.CalledProcessError:
+ print('Error while removing patch in %s' % path)
def dispatch_skia():
- skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
- discard_git(skia_dir)
+ skia_dir = os.path.join(config.android_home, config.bench_dict['Skia'])
+ discard_git(skia_dir)
def dispatch_autotest():
- autotest_dir = os.path.join(config.android_home, config.autotest_dir)
- discard_git(autotest_dir)
+ autotest_dir = os.path.join(config.android_home, config.autotest_dir)
+ discard_git(autotest_dir)
def dispatch_panorama():
- panorama_dir = os.path.join(config.android_home,
- config.bench_dict['Panorama'])
- discard_git(panorama_dir)
- try:
- subprocess.check_call(['rm', '-rf', panorama_dir])
- print('Panorama benchmark directory deleted successfully!')
- except subprocess.CalledProcessError:
- print('Error deleting Panorama benchmark directory')
+ panorama_dir = os.path.join(config.android_home,
+ config.bench_dict['Panorama'])
+ discard_git(panorama_dir)
+ try:
+ subprocess.check_call(['rm', '-rf', panorama_dir])
+ print('Panorama benchmark directory deleted successfully!')
+ except subprocess.CalledProcessError:
+ print('Error deleting Panorama benchmark directory')
def dispatch_synthmark():
- synthmark_dir = 'synthmark'
- try:
- subprocess.check_call(
- ['rm', '-rf',
- os.path.join(config.android_home, synthmark_dir)])
- print('Synthmark patch removed successfully!')
- except subprocess.CalledProcessError:
- print('Synthmark is not removed. Error occurred.')
+ synthmark_dir = 'synthmark'
+ try:
+ subprocess.check_call(
+ ['rm', '-rf',
+ os.path.join(config.android_home, synthmark_dir)])
+ print('Synthmark patch removed successfully!')
+ except subprocess.CalledProcessError:
+ print('Synthmark is not removed. Error occurred.')
def main():
- dispatch_skia()
- dispatch_autotest()
- dispatch_panorama()
- dispatch_synthmark()
+ dispatch_skia()
+ dispatch_autotest()
+ dispatch_panorama()
+ dispatch_synthmark()
if __name__ == '__main__':
- main()
+ main()
diff --git a/fix_json.py b/fix_json.py
index cf94dd65..4e54d313 100755
--- a/fix_json.py
+++ b/fix_json.py
@@ -24,45 +24,45 @@ logging.basicConfig(level=logging.INFO)
def _parse_arguments_internal(argv):
- parser = argparse.ArgumentParser(description='Convert result to JSON'
- 'format')
- parser.add_argument(
- '-b', '--bench', help='Generate JSON format file for which benchmark.')
- return parser.parse_args(argv)
+ parser = argparse.ArgumentParser(description='Convert result to JSON'
+ 'format')
+ parser.add_argument(
+ '-b', '--bench', help='Generate JSON format file for which benchmark.')
+ return parser.parse_args(argv)
def fix_json(bench):
- # Set environment variable for crosperf
- os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
+ # Set environment variable for crosperf
+ os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
- logging.info('Generating Crosperf Report...')
- json_path = os.path.join(config.bench_suite_dir, bench + '_refined')
- crosperf_cmd = [
- os.path.join(config.toolchain_utils, 'generate_report.py'), '--json',
- '-i=' + os.path.join(config.bench_suite_dir, bench + '.json'),
- '-o=' + json_path, '-f'
- ]
+ logging.info('Generating Crosperf Report...')
+ json_path = os.path.join(config.bench_suite_dir, bench + '_refined')
+ crosperf_cmd = [
+ os.path.join(config.toolchain_utils, 'generate_report.py'), '--json',
+ '-i=' + os.path.join(config.bench_suite_dir, bench + '.json'),
+ '-o=' + json_path, '-f'
+ ]
- # Run crosperf generate_report.py
- logging.info('Command: %s', crosperf_cmd)
- subprocess.call(crosperf_cmd)
+ # Run crosperf generate_report.py
+ logging.info('Command: %s', crosperf_cmd)
+ subprocess.call(crosperf_cmd)
- json_path += '.json'
- with open(json_path) as fout:
- objs = json.load(fout)
- for obj in objs:
- obj['branch_name'] = 'aosp/master'
- obj['build_id'] = 0
- with open(json_path, 'w') as fout:
- json.dump(objs, fout)
+ json_path += '.json'
+ with open(json_path) as fout:
+ objs = json.load(fout)
+ for obj in objs:
+ obj['branch_name'] = 'aosp/master'
+ obj['build_id'] = 0
+ with open(json_path, 'w') as fout:
+ json.dump(objs, fout)
- logging.info('JSON file fixed successfully!')
+ logging.info('JSON file fixed successfully!')
def main(argv):
- arguments = _parse_arguments_internal(argv)
+ arguments = _parse_arguments_internal(argv)
- bench = arguments.bench
+ bench = arguments.bench
- fix_json(bench)
+ fix_json(bench)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/fix_skia_results.py b/fix_skia_results.py
index 6eec6ccf..c7ccc873 100755
--- a/fix_skia_results.py
+++ b/fix_skia_results.py
@@ -31,114 +31,115 @@ _RESULT_RENAMES = {
def _GetFamiliarName(name):
- r = _RESULT_RENAMES[name]
- return r if r else name
+ r = _RESULT_RENAMES[name]
+ return r if r else name
def _IsResultInteresting(name):
- return name in _RESULT_RENAMES
+ return name in _RESULT_RENAMES
def _GetTimeMultiplier(label_name):
- """Given a time (in milliseconds), normalize it to what label_name expects.
+ """Given a time (in milliseconds), normalize it to what label_name expects.
- "What label_name expects" meaning "we pattern match against the last few
- non-space chars in label_name."
+ "What label_name expects" meaning "we pattern match against the last few
+ non-space chars in label_name."
- This expects the time unit to be separated from anything else by '_'.
- """
- ms_mul = 1000 * 1000.
- endings = [('_ns', 1), ('_us', 1000), ('_ms', ms_mul), ('_s', ms_mul * 1000)]
- for end, mul in endings:
- if label_name.endswith(end):
- return ms_mul / mul
- raise ValueError('Unknown ending in "%s"; expecting one of %s' %
- (label_name, [end for end, _ in endings]))
+ This expects the time unit to be separated from anything else by '_'.
+ """
+ ms_mul = 1000 * 1000.
+ endings = [('_ns', 1), ('_us', 1000),
+ ('_ms', ms_mul), ('_s', ms_mul * 1000)]
+ for end, mul in endings:
+ if label_name.endswith(end):
+ return ms_mul / mul
+ raise ValueError('Unknown ending in "%s"; expecting one of %s' %
+ (label_name, [end for end, _ in endings]))
def _GetTimeDenom(ms):
- """Given a list of times (in milliseconds), find a sane time unit for them.
+ """Given a list of times (in milliseconds), find a sane time unit for them.
- Returns the unit name, and `ms` normalized to that time unit.
+ Returns the unit name, and `ms` normalized to that time unit.
- >>> _GetTimeDenom([1, 2, 3])
- ('ms', [1.0, 2.0, 3.0])
- >>> _GetTimeDenom([.1, .2, .3])
- ('us', [100.0, 200.0, 300.0])
- """
+ >>> _GetTimeDenom([1, 2, 3])
+ ('ms', [1.0, 2.0, 3.0])
+ >>> _GetTimeDenom([.1, .2, .3])
+ ('us', [100.0, 200.0, 300.0])
+ """
- ms_mul = 1000 * 1000
- units = [('us', 1000), ('ms', ms_mul), ('s', ms_mul * 1000)]
- for name, mul in reversed(units):
- normalized = [float(t) * ms_mul / mul for t in ms]
- average = sum(normalized) / len(normalized)
- if all(n > 0.1 for n in normalized) and average >= 1:
- return name, normalized
+ ms_mul = 1000 * 1000
+ units = [('us', 1000), ('ms', ms_mul), ('s', ms_mul * 1000)]
+ for name, mul in reversed(units):
+ normalized = [float(t) * ms_mul / mul for t in ms]
+ average = sum(normalized) / len(normalized)
+ if all(n > 0.1 for n in normalized) and average >= 1:
+ return name, normalized
- normalized = [float(t) * ms_mul for t in ms]
- return 'ns', normalized
+ normalized = [float(t) * ms_mul for t in ms]
+ return 'ns', normalized
def _TransformBenchmarks(raw_benchmarks):
- # We get {"results": {"bench_name": Results}}
- # where
- # Results = {"config_name": {"samples": [float], etc.}}
- #
- # We want {"data": {"skia": [[BenchmarkData]]},
- # "platforms": ["platform1, ..."]}
- # where
- # BenchmarkData = {"bench_name": bench_samples[N], ..., "retval": 0}
- #
- # Note that retval is awkward -- crosperf's JSON reporter reports the result
- # as a failure if it's not there. Everything else treats it like a
- # statistic...
- benchmarks = raw_benchmarks['results']
- results = []
- for bench_name, bench_result in benchmarks.iteritems():
- try:
- for cfg_name, keyvals in bench_result.iteritems():
- # Some benchmarks won't have timing data (either it won't exist at all,
- # or it'll be empty); skip them.
- samples = keyvals.get('samples')
- if not samples:
- continue
-
- bench_name = '%s_%s' % (bench_name, cfg_name)
- if not _IsResultInteresting(bench_name):
- continue
-
- friendly_name = _GetFamiliarName(bench_name)
- if len(results) < len(samples):
- results.extend({
- 'retval': 0
- } for _ in xrange(len(samples) - len(results)))
-
- time_mul = _GetTimeMultiplier(friendly_name)
- for sample, app in itertools.izip(samples, results):
- assert friendly_name not in app
- app[friendly_name] = sample * time_mul
- except (KeyError, ValueError) as e:
- logging.error('While converting "%s" (key: %s): %s',
- bench_result, bench_name, e.message)
- raise
-
- # Realistically, [results] should be multiple results, where each entry in the
- # list is the result for a different label. Because we only deal with one
- # label at the moment, we need to wrap it in its own list.
- return results
+ # We get {"results": {"bench_name": Results}}
+ # where
+ # Results = {"config_name": {"samples": [float], etc.}}
+ #
+ # We want {"data": {"skia": [[BenchmarkData]]},
+ # "platforms": ["platform1, ..."]}
+ # where
+ # BenchmarkData = {"bench_name": bench_samples[N], ..., "retval": 0}
+ #
+ # Note that retval is awkward -- crosperf's JSON reporter reports the result
+ # as a failure if it's not there. Everything else treats it like a
+ # statistic...
+ benchmarks = raw_benchmarks['results']
+ results = []
+ for bench_name, bench_result in benchmarks.iteritems():
+ try:
+ for cfg_name, keyvals in bench_result.iteritems():
+ # Some benchmarks won't have timing data (either it won't exist
+ # at all, or it'll be empty); skip them.
+ samples = keyvals.get('samples')
+ if not samples:
+ continue
+
+ bench_name = '%s_%s' % (bench_name, cfg_name)
+ if not _IsResultInteresting(bench_name):
+ continue
+
+ friendly_name = _GetFamiliarName(bench_name)
+ if len(results) < len(samples):
+ results.extend({
+ 'retval': 0
+ } for _ in xrange(len(samples) - len(results)))
+
+ time_mul = _GetTimeMultiplier(friendly_name)
+ for sample, app in itertools.izip(samples, results):
+ assert friendly_name not in app
+ app[friendly_name] = sample * time_mul
+ except (KeyError, ValueError) as e:
+ logging.error('While converting "%s" (key: %s): %s',
+ bench_result, bench_name, e.message)
+ raise
+
+ # Realistically, [results] should be multiple results, where each entry in
+ # the list is the result for a different label. Because we only deal with
+ # one label at the moment, we need to wrap it in its own list.
+ return results
if __name__ == '__main__':
- def _GetUserFile(argv):
- if not argv or argv[0] == '-':
- return sys.stdin
- return open(argv[0])
+ def _GetUserFile(argv):
+ if not argv or argv[0] == '-':
+ return sys.stdin
+ return open(argv[0])
- def _Main():
- with _GetUserFile(sys.argv[1:]) as in_file:
- obj = json.load(in_file)
- output = _TransformBenchmarks(obj)
- json.dump(output, sys.stdout)
+ def _Main():
+ with _GetUserFile(sys.argv[1:]) as in_file:
+ obj = json.load(in_file)
+ output = _TransformBenchmarks(obj)
+ json.dump(output, sys.stdout)
- _Main()
+ _Main()
diff --git a/gen_json.py b/gen_json.py
index ad617ff4..812afd8d 100755
--- a/gen_json.py
+++ b/gen_json.py
@@ -22,75 +22,75 @@ logging.basicConfig(level=logging.INFO)
def _parse_arguments_internal(argv):
- parser = argparse.ArgumentParser(description='Convert result to JSON'
- 'format')
+ parser = argparse.ArgumentParser(description='Convert result to JSON'
+ 'format')
- parser.add_argument(
- '-b', '--bench', help='Generate JSON format file for which benchmark.')
+ parser.add_argument(
+ '-b', '--bench', help='Generate JSON format file for which benchmark.')
- parser.add_argument(
- '-i', '--input', help='Specify the input result file name.')
+ parser.add_argument(
+ '-i', '--input', help='Specify the input result file name.')
- parser.add_argument(
- '-o', '--output', help='Specify the output JSON format result file')
+ parser.add_argument(
+ '-o', '--output', help='Specify the output JSON format result file')
- parser.add_argument(
- '-p',
- '--platform',
- help='Indicate the platform(experiment or device) name '
- 'to be shown in JSON')
+ parser.add_argument(
+ '-p',
+ '--platform',
+ help='Indicate the platform(experiment or device) name '
+ 'to be shown in JSON')
- parser.add_argument(
- '--iterations',
- type=int,
- help='How many iterations does the result include.')
- return parser.parse_args(argv)
+ parser.add_argument(
+ '--iterations',
+ type=int,
+ help='How many iterations does the result include.')
+ return parser.parse_args(argv)
# Collect data and generate JSON {} tuple from benchmark result
def collect_data(infile, bench, it):
- result_dict = {}
- with open(infile + str(it)) as fin:
- if bench not in config.bench_parser_dict:
- logging.error('Please input the correct benchmark name.')
- raise ValueError('Wrong benchmark name: %s' % bench)
- parse = config.bench_parser_dict[bench]
- result_dict = parse(bench, fin)
- return result_dict
+ result_dict = {}
+ with open(infile + str(it)) as fin:
+ if bench not in config.bench_parser_dict:
+ logging.error('Please input the correct benchmark name.')
+ raise ValueError('Wrong benchmark name: %s' % bench)
+ parse = config.bench_parser_dict[bench]
+ result_dict = parse(bench, fin)
+ return result_dict
# If there is no original output file, create a new one and init it.
def create_outfile(outfile, bench):
- with open(outfile, 'w') as fout:
- obj_null = {'data': {bench.lower(): []}, 'platforms': []}
- json.dump(obj_null, fout)
+ with open(outfile, 'w') as fout:
+ obj_null = {'data': {bench.lower(): []}, 'platforms': []}
+ json.dump(obj_null, fout)
# Seek the original output file and try to add new result into it.
def get_outfile(outfile, bench):
- try:
- return open(outfile)
- except IOError:
- create_outfile(outfile, bench)
- return open(outfile)
+ try:
+ return open(outfile)
+ except IOError:
+ create_outfile(outfile, bench)
+ return open(outfile)
def main(argv):
- arguments = _parse_arguments_internal(argv)
+ arguments = _parse_arguments_internal(argv)
- bench = arguments.bench
- infile = arguments.input
- outfile = arguments.output
- platform = arguments.platform
- iteration = arguments.iterations
+ bench = arguments.bench
+ infile = arguments.input
+ outfile = arguments.output
+ platform = arguments.platform
+ iteration = arguments.iterations
- result = []
- for i in xrange(iteration):
- result += collect_data(infile, bench, i)
+ result = []
+ for i in xrange(iteration):
+ result += collect_data(infile, bench, i)
- with get_outfile(outfile, bench) as fout:
- obj = json.load(fout)
- obj['platforms'].append(platform)
- obj['data'][bench.lower()].append(result)
- with open(outfile, 'w') as fout:
- json.dump(obj, fout)
+ with get_outfile(outfile, bench) as fout:
+ obj = json.load(fout)
+ obj['platforms'].append(platform)
+ obj['data'][bench.lower()].append(result)
+ with open(outfile, 'w') as fout:
+ json.dump(obj, fout)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/parse_result.py b/parse_result.py
index 90b3c4d1..ba5ee802 100644
--- a/parse_result.py
+++ b/parse_result.py
@@ -9,106 +9,106 @@ from fix_skia_results import _TransformBenchmarks
import json
def normalize(bench, dict_list):
- bench_base = {
- 'Panorama': 1,
- 'Dex2oat': 1,
- 'Hwui': 10000,
- 'Skia': 1,
- 'Synthmark': 1,
- 'Binder': 0.001
- }
- result_dict = dict_list[0]
- for key in result_dict:
- result_dict[key] = result_dict[key] / bench_base[bench]
- return [result_dict]
+ bench_base = {
+ 'Panorama': 1,
+ 'Dex2oat': 1,
+ 'Hwui': 10000,
+ 'Skia': 1,
+ 'Synthmark': 1,
+ 'Binder': 0.001
+ }
+ result_dict = dict_list[0]
+ for key in result_dict:
+ result_dict[key] = result_dict[key] / bench_base[bench]
+ return [result_dict]
# Functions to parse benchmark result for data collection.
def parse_Panorama(bench, fin):
- result_dict = {}
- for line in fin:
- words = line.split()
- if 'elapsed' in words:
- #TODO: Need to restructure the embedded word counts.
- result_dict['total_time_s'] = float(words[3])
- result_dict['retval'] = 0
- return normalize(bench, [result_dict])
- raise ValueError('You passed the right type of thing, '
- 'but it didn\'t have the expected contents.')
+ result_dict = {}
+ for line in fin:
+ words = line.split()
+ if 'elapsed' in words:
+ #TODO: Need to restructure the embedded word counts.
+ result_dict['total_time_s'] = float(words[3])
+ result_dict['retval'] = 0
+ return normalize(bench, [result_dict])
+ raise ValueError('You passed the right type of thing, '
+ 'but it didn\'t have the expected contents.')
def parse_Synthmark(bench, fin):
- result_dict = {}
- accum = 0
- cnt = 0
- for line in fin:
- words = line.split()
- if 'normalized' in words:
- #TODO: Need to restructure the embedded word counts.
- accum += float(words[-1])
- cnt += 1
- if accum != 0:
- result_dict['total_voices'] = accum / cnt
- result_dict['retval'] = 0
- return normalize(bench, [result_dict])
- raise ValueError('You passed the right type of thing, '
- 'but it didn\'t have the expected contents.')
+ result_dict = {}
+ accum = 0
+ cnt = 0
+ for line in fin:
+ words = line.split()
+ if 'normalized' in words:
+ #TODO: Need to restructure the embedded word counts.
+ accum += float(words[-1])
+ cnt += 1
+ if accum != 0:
+ result_dict['total_voices'] = accum / cnt
+ result_dict['retval'] = 0
+ return normalize(bench, [result_dict])
+ raise ValueError('You passed the right type of thing, '
+ 'but it didn\'t have the expected contents.')
def parse_Binder(bench, fin):
- result_dict = {}
- accum = 0
- cnt = 0
- for line in fin:
- words = line.split()
- for word in words:
- if 'average' in word:
- #TODO: Need to restructure the embedded word counts.
- accum += float(word[8:-2])
- cnt += 1
- if accum != 0:
- result_dict['avg_time_ms'] = accum / cnt
- result_dict['retval'] = 0
- return normalize(bench, [result_dict])
- raise ValueError('You passed the right type of thing, '
- 'but it didn\'t have the expected contents.')
+ result_dict = {}
+ accum = 0
+ cnt = 0
+ for line in fin:
+ words = line.split()
+ for word in words:
+ if 'average' in word:
+ #TODO: Need to restructure the embedded word counts.
+ accum += float(word[8:-2])
+ cnt += 1
+ if accum != 0:
+ result_dict['avg_time_ms'] = accum / cnt
+ result_dict['retval'] = 0
+ return normalize(bench, [result_dict])
+ raise ValueError('You passed the right type of thing, '
+ 'but it didn\'t have the expected contents.')
def parse_Dex2oat(bench, fin):
- result_dict = {}
- cnt = 0
- for line in fin:
- words = line.split()
- if 'elapsed' in words:
- cnt += 1
- #TODO: Need to restructure the embedded word counts.
- if cnt == 1:
- # First 'elapsed' time is for microbench 'Chrome'
- result_dict['chrome_s'] = float(words[3])
- elif cnt == 2:
- # Second 'elapsed' time is for microbench 'Camera'
- result_dict['camera_s'] = float(words[3])
-
- result_dict['retval'] = 0
- # Two results found, return
- return normalize(bench, [result_dict])
- raise ValueError('You passed the right type of thing, '
- 'but it didn\'t have the expected contents.')
+ result_dict = {}
+ cnt = 0
+ for line in fin:
+ words = line.split()
+ if 'elapsed' in words:
+ cnt += 1
+ #TODO: Need to restructure the embedded word counts.
+ if cnt == 1:
+ # First 'elapsed' time is for microbench 'Chrome'
+ result_dict['chrome_s'] = float(words[3])
+ elif cnt == 2:
+ # Second 'elapsed' time is for microbench 'Camera'
+ result_dict['camera_s'] = float(words[3])
+
+ result_dict['retval'] = 0
+ # Two results found, return
+ return normalize(bench, [result_dict])
+ raise ValueError('You passed the right type of thing, '
+ 'but it didn\'t have the expected contents.')
def parse_Hwui(bench, fin):
- result_dict = {}
- for line in fin:
- words = line.split()
- if 'elapsed' in words:
- #TODO: Need to restructure the embedded word counts.
- result_dict['total_time_s'] = float(words[3])
- result_dict['retval'] = 0
- return normalize(bench, [result_dict])
- raise ValueError('You passed the right type of thing, '
- 'but it didn\'t have the expected contents.')
+ result_dict = {}
+ for line in fin:
+ words = line.split()
+ if 'elapsed' in words:
+ #TODO: Need to restructure the embedded word counts.
+ result_dict['total_time_s'] = float(words[3])
+ result_dict['retval'] = 0
+ return normalize(bench, [result_dict])
+ raise ValueError('You passed the right type of thing, '
+ 'but it didn\'t have the expected contents.')
def parse_Skia(bench, fin):
- obj = json.load(fin)
- return normalize(bench, _TransformBenchmarks(obj))
+ obj = json.load(fin)
+ return normalize(bench, _TransformBenchmarks(obj))
diff --git a/run.py b/run.py
index e74162b6..25c6ebeb 100755
--- a/run.py
+++ b/run.py
@@ -24,458 +24,466 @@ import sys
logging.basicConfig(level=logging.INFO)
def _parse_arguments(argv):
- parser = argparse.ArgumentParser(description='Build and run specific '
- 'benchamrk')
- parser.add_argument(
- '-b',
- '--bench',
- action='append',
- default=[],
- help='Select which benchmark to run')
-
- # Only one of compiler directory and llvm prebuilts version can be indicated
- # at the beginning, so set -c and -l into a exclusive group.
- group = parser.add_mutually_exclusive_group()
-
- # The toolchain setting arguments has action of 'append', so that users
- # could compare performance with several toolchain settings together.
- group.add_argument(
- '-c',
- '--compiler_dir',
- metavar='DIR',
- action='append',
- default=[],
- help='Specify path to the compiler\'s bin directory. '
- 'You shall give several paths, each with a -c, to '
- 'compare performance differences in '
- 'each compiler.')
-
- parser.add_argument(
- '-o',
- '--build_os',
- action='append',
- default=[],
- help='Specify the host OS to build the benchmark.')
-
- group.add_argument(
- '-l',
- '--llvm_prebuilts_version',
- action='append',
- default=[],
- help='Specify the version of prebuilt LLVM. When '
- 'specific prebuilt version of LLVM already '
- 'exists, no need to pass the path to compiler '
- 'directory.')
-
- parser.add_argument(
- '-f',
- '--cflags',
- action='append',
- default=[],
- help='Specify the cflags options for the toolchain. '
- 'Be sure to quote all the cflags with quotation '
- 'mark("") or use equal(=).')
- parser.add_argument(
- '--ldflags',
- action='append',
- default=[],
- help='Specify linker flags for the toolchain.')
-
- parser.add_argument(
- '-i',
- '--iterations',
- type=int,
- default=1,
- help='Specify how many iterations does the test '
- 'take.')
-
- # Arguments -s and -r are for connecting to DUT.
- parser.add_argument(
- '-s',
- '--serials',
- help='Comma separate list of device serials under '
- 'test.')
-
- parser.add_argument(
- '-r',
- '--remote',
- default='localhost',
- help='hostname[:port] if the ADB device is connected '
- 'to a remote machine. Ensure this workstation '
- 'is configured for passwordless ssh access as '
- 'users "root" or "adb"')
-
- # Arguments -frequency and -m are for device settings
- parser.add_argument(
- '--frequency',
- type=int,
- default=979200,
- help='Specify the CPU frequency of the device. The '
- 'unit is KHZ. The available value is defined in'
- 'cpufreq/scaling_available_frequency file in '
- 'device\'s each core directory. '
- 'The default value is 979200, which shows a '
- 'balance in noise and performance. Lower '
- 'frequency will slow down the performance but '
- 'reduce noise.')
-
- parser.add_argument(
- '-m',
- '--mode',
- default='little',
- help='User can specify whether \'little\' or \'big\' '
- 'mode to use. The default one is little mode. '
- 'The little mode runs on a single core of '
- 'Cortex-A53, while big mode runs on single core '
- 'of Cortex-A57.')
-
- # Configure file for benchmark test
- parser.add_argument(
- '-t',
- '--test',
- help='Specify the test settings with configuration '
- 'file.')
-
- # Whether to keep old json result or not
- parser.add_argument(
- '-k',
- '--keep',
- default='False',
- help='User can specify whether to keep the old json '
- 'results from last run. This can be useful if you '
- 'want to compare performance differences in two or '
- 'more different runs. Default is False(off).')
-
- return parser.parse_args(argv)
+ parser = argparse.ArgumentParser(description='Build and run specific '
+ 'benchamrk')
+ parser.add_argument(
+ '-b',
+ '--bench',
+ action='append',
+ default=[],
+ help='Select which benchmark to run')
+
+ # Only one of compiler directory and llvm prebuilts version can be indicated
+ # at the beginning, so set -c and -l into a exclusive group.
+ group = parser.add_mutually_exclusive_group()
+
+ # The toolchain setting arguments has action of 'append', so that users
+ # could compare performance with several toolchain settings together.
+ group.add_argument(
+ '-c',
+ '--compiler_dir',
+ metavar='DIR',
+ action='append',
+ default=[],
+ help='Specify path to the compiler\'s bin directory. '
+ 'You shall give several paths, each with a -c, to '
+ 'compare performance differences in '
+ 'each compiler.')
+
+ parser.add_argument(
+ '-o',
+ '--build_os',
+ action='append',
+ default=[],
+ help='Specify the host OS to build the benchmark.')
+
+ group.add_argument(
+ '-l',
+ '--llvm_prebuilts_version',
+ action='append',
+ default=[],
+ help='Specify the version of prebuilt LLVM. When '
+ 'specific prebuilt version of LLVM already '
+ 'exists, no need to pass the path to compiler '
+ 'directory.')
+
+ parser.add_argument(
+ '-f',
+ '--cflags',
+ action='append',
+ default=[],
+ help='Specify the cflags options for the toolchain. '
+ 'Be sure to quote all the cflags with quotation '
+ 'mark("") or use equal(=).')
+ parser.add_argument(
+ '--ldflags',
+ action='append',
+ default=[],
+ help='Specify linker flags for the toolchain.')
+
+ parser.add_argument(
+ '-i',
+ '--iterations',
+ type=int,
+ default=1,
+ help='Specify how many iterations does the test '
+ 'take.')
+
+ # Arguments -s and -r are for connecting to DUT.
+ parser.add_argument(
+ '-s',
+ '--serials',
+ help='Comma separate list of device serials under '
+ 'test.')
+
+ parser.add_argument(
+ '-r',
+ '--remote',
+ default='localhost',
+ help='hostname[:port] if the ADB device is connected '
+ 'to a remote machine. Ensure this workstation '
+ 'is configured for passwordless ssh access as '
+ 'users "root" or "adb"')
+
+ # Arguments -frequency and -m are for device settings
+ parser.add_argument(
+ '--frequency',
+ type=int,
+ default=979200,
+ help='Specify the CPU frequency of the device. The '
+ 'unit is KHZ. The available value is defined in'
+ 'cpufreq/scaling_available_frequency file in '
+ 'device\'s each core directory. '
+ 'The default value is 979200, which shows a '
+ 'balance in noise and performance. Lower '
+ 'frequency will slow down the performance but '
+ 'reduce noise.')
+
+ parser.add_argument(
+ '-m',
+ '--mode',
+ default='little',
+ help='User can specify whether \'little\' or \'big\' '
+ 'mode to use. The default one is little mode. '
+ 'The little mode runs on a single core of '
+ 'Cortex-A53, while big mode runs on single core '
+ 'of Cortex-A57.')
+
+ # Configure file for benchmark test
+ parser.add_argument(
+ '-t',
+ '--test',
+ help='Specify the test settings with configuration '
+ 'file.')
+
+ # Whether to keep old json result or not
+ parser.add_argument(
+ '-k',
+ '--keep',
+ default='False',
+ help='User can specify whether to keep the old json '
+ 'results from last run. This can be useful if you '
+ 'want to compare performance differences in two or '
+ 'more different runs. Default is False(off).')
+
+ return parser.parse_args(argv)
# Clear old log files in bench suite directory
def clear_logs():
- logging.info('Removing old logfiles...')
- for f in ['build_log', 'device_log', 'test_log']:
- logfile = os.path.join(config.bench_suite_dir, f)
- try:
- os.remove(logfile)
- except OSError:
- logging.info('No logfile %s need to be removed. Ignored.', f)
- logging.info('Old logfiles been removed.')
+ logging.info('Removing old logfiles...')
+ for f in ['build_log', 'device_log', 'test_log']:
+ logfile = os.path.join(config.bench_suite_dir, f)
+ try:
+ os.remove(logfile)
+ except OSError:
+ logging.info('No logfile %s need to be removed. Ignored.', f)
+ logging.info('Old logfiles been removed.')
# Clear old json files in bench suite directory
def clear_results():
- logging.info('Clearing old json results...')
- for bench in config.bench_list:
- result = os.path.join(config.bench_suite_dir, bench + '.json')
- try:
- os.remove(result)
- except OSError:
- logging.info('no %s json file need to be removed. Ignored.', bench)
- logging.info('Old json results been removed.')
+ logging.info('Clearing old json results...')
+ for bench in config.bench_list:
+ result = os.path.join(config.bench_suite_dir, bench + '.json')
+ try:
+ os.remove(result)
+ except OSError:
+ logging.info('no %s json file need to be removed. Ignored.', bench)
+ logging.info('Old json results been removed.')
# Use subprocess.check_call to run other script, and put logs to files
def check_call_with_log(cmd, log_file):
- log_file = os.path.join(config.bench_suite_dir, log_file)
- with open(log_file, 'a') as logfile:
- log_header = 'Log for command: %s\n' % (cmd)
- logfile.write(log_header)
- try:
- subprocess.check_call(cmd, stdout=logfile)
- except subprocess.CalledProcessError:
- logging.error('Error running %s, please check %s for more info.', cmd,
- log_file)
- raise
- logging.info('Logs for %s are written to %s.', cmd, log_file)
+ log_file = os.path.join(config.bench_suite_dir, log_file)
+ with open(log_file, 'a') as logfile:
+ log_header = 'Log for command: %s\n' % (cmd)
+ logfile.write(log_header)
+ try:
+ subprocess.check_call(cmd, stdout=logfile)
+ except subprocess.CalledProcessError:
+ logging.error('Error running %s, please check %s for more info.',
+ cmd, log_file)
+ raise
+ logging.info('Logs for %s are written to %s.', cmd, log_file)
def set_device(serials, remote, frequency):
- setting_cmd = [
- os.path.join(
- os.path.join(config.android_home, config.autotest_dir),
- 'site_utils/set_device.py')
- ]
- setting_cmd.append('-r=' + remote)
- setting_cmd.append('-q=' + str(frequency))
-
- # Deal with serials.
- # If there is no serails specified, try to run test on the only device.
- # If specified, split the serials into a list and run test on each device.
- if serials:
- for serial in serials.split(','):
- setting_cmd.append('-s=' + serial)
- check_call_with_log(setting_cmd, 'device_log')
- setting_cmd.pop()
- else:
- check_call_with_log(setting_cmd, 'device_log')
+ setting_cmd = [
+ os.path.join(
+ os.path.join(config.android_home, config.autotest_dir),
+ 'site_utils/set_device.py')
+ ]
+ setting_cmd.append('-r=' + remote)
+ setting_cmd.append('-q=' + str(frequency))
- logging.info('CPU mode and frequency set successfully!')
+ # Deal with serials.
+ # If there is no serails specified, try to run test on the only device.
+ # If specified, split the serials into a list and run test on each device.
+ if serials:
+ for serial in serials.split(','):
+ setting_cmd.append('-s=' + serial)
+ check_call_with_log(setting_cmd, 'device_log')
+ setting_cmd.pop()
+ else:
+ check_call_with_log(setting_cmd, 'device_log')
+
+ logging.info('CPU mode and frequency set successfully!')
def log_ambiguous_args():
- logging.error('The count of arguments does not match!')
- raise ValueError('The count of arguments does not match.')
+ logging.error('The count of arguments does not match!')
+ raise ValueError('The count of arguments does not match.')
# Check if the count of building arguments are log_ambiguous or not. The
# number of -c/-l, -f, and -os should be either all 0s or all the same.
def check_count(compiler, llvm_version, build_os, cflags, ldflags):
- # Count will be set to 0 if no compiler or llvm_version specified.
- # Otherwise, one of these two args length should be 0 and count will be
- # the other one.
- count = max(len(compiler), len(llvm_version))
+ # Count will be set to 0 if no compiler or llvm_version specified.
+ # Otherwise, one of these two args length should be 0 and count will be
+ # the other one.
+ count = max(len(compiler), len(llvm_version))
- # Check if number of cflags is 0 or the same with before.
- if len(cflags) != 0:
- if count != 0 and len(cflags) != count:
- log_ambiguous_args()
- count = len(cflags)
+ # Check if number of cflags is 0 or the same with before.
+ if len(cflags) != 0:
+ if count != 0 and len(cflags) != count:
+ log_ambiguous_args()
+ count = len(cflags)
- if len(ldflags) != 0:
- if count != 0 and len(ldflags) != count:
- log_ambiguous_args()
- count = len(ldflags)
+ if len(ldflags) != 0:
+ if count != 0 and len(ldflags) != count:
+ log_ambiguous_args()
+ count = len(ldflags)
- if len(build_os) != 0:
- if count != 0 and len(build_os) != count:
- log_ambiguous_args()
- count = len(build_os)
+ if len(build_os) != 0:
+ if count != 0 and len(build_os) != count:
+ log_ambiguous_args()
+ count = len(build_os)
- # If no settings are passed, only run default once.
- return max(1, count)
+ # If no settings are passed, only run default once.
+ return max(1, count)
# Build benchmark binary with toolchain settings
def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
ldflags):
- # Build benchmark locally
- build_cmd = ['./build_bench.py', '-b=' + bench]
- if compiler:
- build_cmd.append('-c=' + compiler[setting_no])
- if llvm_version:
- build_cmd.append('-l=' + llvm_version[setting_no])
- if build_os:
- build_cmd.append('-o=' + build_os[setting_no])
- if cflags:
- build_cmd.append('-f=' + cflags[setting_no])
- if ldflags:
- build_cmd.append('--ldflags=' + ldflags[setting_no])
-
- logging.info('Building benchmark for toolchain setting No.%d...', setting_no)
- logging.info('Command: %s', build_cmd)
-
- try:
- subprocess.check_call(build_cmd)
- except:
- logging.error('Error while building benchmark!')
- raise
+ # Build benchmark locally
+ build_cmd = ['./build_bench.py', '-b=' + bench]
+ if compiler:
+ build_cmd.append('-c=' + compiler[setting_no])
+ if llvm_version:
+ build_cmd.append('-l=' + llvm_version[setting_no])
+ if build_os:
+ build_cmd.append('-o=' + build_os[setting_no])
+ if cflags:
+ build_cmd.append('-f=' + cflags[setting_no])
+ if ldflags:
+ build_cmd.append('--ldflags=' + ldflags[setting_no])
+
+ logging.info('Building benchmark for toolchain setting No.%d...',
+ setting_no)
+ logging.info('Command: %s', build_cmd)
+
+ try:
+ subprocess.check_call(build_cmd)
+ except:
+ logging.error('Error while building benchmark!')
+ raise
def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
- # Run autotest script for benchmark on DUT
- check_call_with_log(test_cmd, 'test_log')
+ # Run autotest script for benchmark on DUT
+ check_call_with_log(test_cmd, 'test_log')
- logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
- 'device %s.', setting_no, i, serial)
+ logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
+ 'device %s.', setting_no, i, serial)
- # Rename results from the bench_result generated in autotest
- bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
- if not os.path.exists(bench_result):
- logging.error('No result found at %s, '
- 'please check test_log for details.', bench_result)
- raise OSError('Result file %s not found.' % bench_result)
+ # Rename results from the bench_result generated in autotest
+ bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
+ if not os.path.exists(bench_result):
+ logging.error('No result found at %s, '
+ 'please check test_log for details.', bench_result)
+ raise OSError('Result file %s not found.' % bench_result)
- new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial, setting_no, i)
- new_bench_result_path = os.path.join(config.bench_suite_dir, new_bench_result)
- try:
- os.rename(bench_result, new_bench_result_path)
- except OSError:
- logging.error('Error while renaming raw result %s to %s', bench_result,
- new_bench_result_path)
- raise
+ new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial,
+ setting_no, i)
+ new_bench_result_path = os.path.join(config.bench_suite_dir,
+ new_bench_result)
+ try:
+ os.rename(bench_result, new_bench_result_path)
+ except OSError:
+ logging.error('Error while renaming raw result %s to %s',
+ bench_result, new_bench_result_path)
+ raise
- logging.info('Benchmark result saved at %s.', new_bench_result_path)
+ logging.info('Benchmark result saved at %s.', new_bench_result_path)
def test_bench(bench, setting_no, iterations, serials, remote, mode):
- logging.info('Start running benchmark on device...')
-
- # Run benchmark and tests on DUT
- for i in xrange(iterations):
- logging.info('Iteration No.%d:', i)
- test_cmd = [
- os.path.join(
- os.path.join(config.android_home, config.autotest_dir),
- 'site_utils/test_bench.py')
- ]
- test_cmd.append('-b=' + bench)
- test_cmd.append('-r=' + remote)
- test_cmd.append('-m=' + mode)
-
- # Deal with serials.
- # If there is no serails specified, try to run test on the only device.
- # If specified, split the serials into a list and run test on each device.
- if serials:
- for serial in serials.split(','):
- test_cmd.append('-s=' + serial)
-
- run_and_collect_result(test_cmd, setting_no, i, bench, serial)
- test_cmd.pop()
- else:
- run_and_collect_result(test_cmd, setting_no, i, bench)
+ logging.info('Start running benchmark on device...')
+
+ # Run benchmark and tests on DUT
+ for i in xrange(iterations):
+ logging.info('Iteration No.%d:', i)
+ test_cmd = [
+ os.path.join(
+ os.path.join(config.android_home, config.autotest_dir),
+ 'site_utils/test_bench.py')
+ ]
+ test_cmd.append('-b=' + bench)
+ test_cmd.append('-r=' + remote)
+ test_cmd.append('-m=' + mode)
+
+ # Deal with serials. If there is no serails specified, try to run test
+ # on the only device. If specified, split the serials into a list and
+ # run test on each device.
+ if serials:
+ for serial in serials.split(','):
+ test_cmd.append('-s=' + serial)
+
+ run_and_collect_result(test_cmd, setting_no, i, bench, serial)
+ test_cmd.pop()
+ else:
+ run_and_collect_result(test_cmd, setting_no, i, bench)
def gen_json(bench, setting_no, iterations, serials):
- bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
+ bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
- logging.info('Generating JSON file for Crosperf...')
+ logging.info('Generating JSON file for Crosperf...')
- if not serials:
- serials = 'default'
+ if not serials:
+ serials = 'default'
- for serial in serials.split(','):
+ for serial in serials.split(','):
- # Platform will be used as device lunch combo instead
- #experiment = '_'.join([serial, str(setting_no)])
- experiment = config.product_combo
+ # Platform will be used as device lunch combo instead
+ #experiment = '_'.join([serial, str(setting_no)])
+ experiment = config.product_combo
- # Input format: bench_result_{bench}_{serial}_{setting_no}_
- input_file = '_'.join([bench_result, bench, serial, str(setting_no), ''])
- gen_json_cmd = [
- './gen_json.py', '--input=' + input_file,
- '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
- '--bench=' + bench, '--platform=' + experiment,
- '--iterations=' + str(iterations)
- ]
+ # Input format: bench_result_{bench}_{serial}_{setting_no}_
+ input_file = '_'.join([bench_result, bench,
+ serial, str(setting_no), ''])
+ gen_json_cmd = [
+ './gen_json.py', '--input=' + input_file,
+ '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
+ '--bench=' + bench, '--platform=' + experiment,
+ '--iterations=' + str(iterations)
+ ]
- logging.info('Command: %s', gen_json_cmd)
- if subprocess.call(gen_json_cmd):
- logging.error('Error while generating JSON file, please check raw data'
- 'of the results at %s.', input_file)
+ logging.info('Command: %s', gen_json_cmd)
+ if subprocess.call(gen_json_cmd):
+ logging.error('Error while generating JSON file, please check raw'
+ ' data of the results at %s.', input_file)
def gen_crosperf(infile, outfile):
- # Set environment variable for crosperf
- os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
+ # Set environment variable for crosperf
+ os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
- logging.info('Generating Crosperf Report...')
- crosperf_cmd = [
- os.path.join(config.toolchain_utils, 'generate_report.py'),
- '-i=' + infile, '-o=' + outfile, '-f'
- ]
+ logging.info('Generating Crosperf Report...')
+ crosperf_cmd = [
+ os.path.join(config.toolchain_utils, 'generate_report.py'),
+ '-i=' + infile, '-o=' + outfile, '-f'
+ ]
- # Run crosperf generate_report.py
- logging.info('Command: %s', crosperf_cmd)
- subprocess.call(crosperf_cmd)
+ # Run crosperf generate_report.py
+ logging.info('Command: %s', crosperf_cmd)
+ subprocess.call(crosperf_cmd)
- logging.info('Report generated successfully!')
- logging.info('Report Location: ' + outfile + '.html at bench'
- 'suite directory.')
+ logging.info('Report generated successfully!')
+ logging.info('Report Location: ' + outfile + '.html at bench'
+ 'suite directory.')
def main(argv):
- # Set environment variable for the local loacation of benchmark suite.
- # This is for collecting testing results to benchmark suite directory.
- os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
-
- # Set Android type, used for the difference part between aosp and internal.
- os.environ['ANDROID_TYPE'] = config.android_type
-
- # Set ANDROID_HOME for both building and testing.
- os.environ['ANDROID_HOME'] = config.android_home
-
- # Set environment variable for architecture, this will be used in
- # autotest.
- os.environ['PRODUCT'] = config.product
-
- arguments = _parse_arguments(argv)
-
- bench_list = arguments.bench
- if not bench_list:
- bench_list = config.bench_list
-
- compiler = arguments.compiler_dir
- build_os = arguments.build_os
- llvm_version = arguments.llvm_prebuilts_version
- cflags = arguments.cflags
- ldflags = arguments.ldflags
- iterations = arguments.iterations
- serials = arguments.serials
- remote = arguments.remote
- frequency = arguments.frequency
- mode = arguments.mode
- keep = arguments.keep
-
- # Clear old logs every time before run script
- clear_logs()
-
- if keep == 'False':
- clear_results()
-
- # Set test mode and frequency of CPU on the DUT
- set_device(serials, remote, frequency)
-
- test = arguments.test
- # if test configuration file has been given, use the build settings
- # in the configuration file and run the test.
- if test:
- test_config = ConfigParser.ConfigParser(allow_no_value=True)
- if not test_config.read(test):
- logging.error('Error while reading from building '
- 'configuration file %s.', test)
- raise RuntimeError('Error while reading configuration file %s.' % test)
-
- for setting_no, section in enumerate(test_config.sections()):
- bench = test_config.get(section, 'bench')
- compiler = [test_config.get(section, 'compiler')]
- build_os = [test_config.get(section, 'build_os')]
- llvm_version = [test_config.get(section, 'llvm_version')]
- cflags = [test_config.get(section, 'cflags')]
- ldflags = [test_config.get(section, 'ldflags')]
-
- # Set iterations from test_config file, if not exist, use the one from
- # command line.
- it = test_config.get(section, 'iterations')
- if not it:
- it = iterations
- it = int(it)
-
- # Build benchmark for each single test configuration
- build_bench(0, bench, compiler, llvm_version, build_os, cflags, ldflags)
-
- test_bench(bench, setting_no, it, serials, remote, mode)
-
- gen_json(bench, setting_no, it, serials)
-
- for bench in config.bench_list:
- infile = os.path.join(config.bench_suite_dir, bench + '.json')
- if os.path.exists(infile):
+ # Set environment variable for the local loacation of benchmark suite.
+ # This is for collecting testing results to benchmark suite directory.
+ os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
+
+ # Set Android type, used for the difference part between aosp and internal.
+ os.environ['ANDROID_TYPE'] = config.android_type
+
+ # Set ANDROID_HOME for both building and testing.
+ os.environ['ANDROID_HOME'] = config.android_home
+
+ # Set environment variable for architecture, this will be used in
+ # autotest.
+ os.environ['PRODUCT'] = config.product
+
+ arguments = _parse_arguments(argv)
+
+ bench_list = arguments.bench
+ if not bench_list:
+ bench_list = config.bench_list
+
+ compiler = arguments.compiler_dir
+ build_os = arguments.build_os
+ llvm_version = arguments.llvm_prebuilts_version
+ cflags = arguments.cflags
+ ldflags = arguments.ldflags
+ iterations = arguments.iterations
+ serials = arguments.serials
+ remote = arguments.remote
+ frequency = arguments.frequency
+ mode = arguments.mode
+ keep = arguments.keep
+
+ # Clear old logs every time before run script
+ clear_logs()
+
+ if keep == 'False':
+ clear_results()
+
+ # Set test mode and frequency of CPU on the DUT
+ set_device(serials, remote, frequency)
+
+ test = arguments.test
+ # if test configuration file has been given, use the build settings
+ # in the configuration file and run the test.
+ if test:
+ test_config = ConfigParser.ConfigParser(allow_no_value=True)
+ if not test_config.read(test):
+ logging.error('Error while reading from building '
+ 'configuration file %s.', test)
+ raise RuntimeError('Error while reading configuration file %s.'
+ % test)
+
+ for setting_no, section in enumerate(test_config.sections()):
+ bench = test_config.get(section, 'bench')
+ compiler = [test_config.get(section, 'compiler')]
+ build_os = [test_config.get(section, 'build_os')]
+ llvm_version = [test_config.get(section, 'llvm_version')]
+ cflags = [test_config.get(section, 'cflags')]
+ ldflags = [test_config.get(section, 'ldflags')]
+
+ # Set iterations from test_config file, if not exist, use the one
+ # from command line.
+ it = test_config.get(section, 'iterations')
+ if not it:
+ it = iterations
+ it = int(it)
+
+ # Build benchmark for each single test configuration
+ build_bench(0, bench, compiler, llvm_version,
+ build_os, cflags, ldflags)
+
+ test_bench(bench, setting_no, it, serials, remote, mode)
+
+ gen_json(bench, setting_no, it, serials)
+
+ for bench in config.bench_list:
+ infile = os.path.join(config.bench_suite_dir, bench + '.json')
+ if os.path.exists(infile):
+ outfile = os.path.join(config.bench_suite_dir,
+ bench + '_report')
+ gen_crosperf(infile, outfile)
+
+ # Stop script if there is only config file provided
+ return 0
+
+ # If no configuration file specified, continue running.
+ # Check if the count of the setting arguments are log_ambiguous.
+ setting_count = check_count(compiler, llvm_version, build_os,
+ cflags, ldflags)
+
+ for bench in bench_list:
+ logging.info('Start building and running benchmark: [%s]', bench)
+ # Run script for each toolchain settings
+ for setting_no in xrange(setting_count):
+ build_bench(setting_no, bench, compiler, llvm_version,
+ build_os, cflags, ldflags)
+
+ # Run autotest script for benchmark test on device
+ test_bench(bench, setting_no, iterations, serials, remote, mode)
+
+ gen_json(bench, setting_no, iterations, serials)
+
+ infile = os.path.join(config.bench_suite_dir, bench + '.json')
outfile = os.path.join(config.bench_suite_dir, bench + '_report')
gen_crosperf(infile, outfile)
- # Stop script if there is only config file provided
- return 0
-
- # If no configuration file specified, continue running.
- # Check if the count of the setting arguments are log_ambiguous.
- setting_count = check_count(compiler, llvm_version, build_os, cflags, ldflags)
-
- for bench in bench_list:
- logging.info('Start building and running benchmark: [%s]', bench)
- # Run script for each toolchain settings
- for setting_no in xrange(setting_count):
- build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
- ldflags)
-
- # Run autotest script for benchmark test on device
- test_bench(bench, setting_no, iterations, serials, remote, mode)
-
- gen_json(bench, setting_no, iterations, serials)
-
- infile = os.path.join(config.bench_suite_dir, bench + '.json')
- outfile = os.path.join(config.bench_suite_dir, bench + '_report')
- gen_crosperf(infile, outfile)
-
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/set_flags.py b/set_flags.py
index a243c7cf..b6221d18 100644
--- a/set_flags.py
+++ b/set_flags.py
@@ -13,116 +13,117 @@ import subprocess
# Find the makefile/blueprint based on the benchmark, and make a copy of
# it for restoring later.
def backup_file(bench, file_type):
- mk_file = os.path.join(config.android_home, config.bench_dict[bench],
- 'Android.' + file_type)
- try:
- # Make a copy of the makefile/blueprint so that we can recover it after
- # building the benchmark
- subprocess.check_call([
- 'cp', mk_file,
- os.path.join(config.android_home, config.bench_dict[bench],
- 'tmp_makefile')
- ])
- except subprocess.CalledProcessError():
- raise OSError('Cannot backup Android.%s file for %s' % (file_type, bench))
+ mk_file = os.path.join(config.android_home, config.bench_dict[bench],
+ 'Android.' + file_type)
+ try:
+ # Make a copy of the makefile/blueprint so that we can recover it after
+ # building the benchmark
+ subprocess.check_call([
+ 'cp', mk_file,
+ os.path.join(config.android_home, config.bench_dict[bench],
+ 'tmp_makefile')
+ ])
+ except subprocess.CalledProcessError():
+ raise OSError('Cannot backup Android.%s file for %s' % (file_type,
+ bench))
# Insert lines to add LOCAL_CFLAGS/LOCAL_LDFLAGS to the benchmarks
# makefile/blueprint
def replace_flags(bench, android_type, file_type, cflags, ldflags):
- # Use format ["Flag1", "Flag2"] for bp file
- if file_type == 'bp':
- if cflags:
- cflags = '\", \"'.join(cflags.split())
- if ldflags:
- ldflags = '\", \"'.join(ldflags.split())
-
- if not cflags:
- cflags = ''
- else:
- cflags = '\"' + cflags + '\",'
- if not ldflags:
- ldflags = ''
- else:
- ldflags = '\"' + ldflags + '\",'
-
- # Two different diffs are used for aosp or internal android repo.
- if android_type == 'aosp':
- bench_diff = bench + '_flags_aosp.diff'
- else:
- bench_diff = bench + '_flags_internal.diff'
-
- # Replace CFLAGS_FOR_BENCH_SUITE marker with proper cflags
- output = ''
- with open(bench_diff) as f:
- for line in f:
- line = line.replace('CFLAGS_FOR_BENCH_SUITE', cflags)
- line = line.replace('LDFLAGS_FOR_BENCH_SUITE', ldflags)
- output += line
-
- with open('modified.diff', 'w') as f:
- f.write(output)
+ # Use format ["Flag1", "Flag2"] for bp file
+ if file_type == 'bp':
+ if cflags:
+ cflags = '\", \"'.join(cflags.split())
+ if ldflags:
+ ldflags = '\", \"'.join(ldflags.split())
+
+ if not cflags:
+ cflags = ''
+ else:
+ cflags = '\"' + cflags + '\",'
+ if not ldflags:
+ ldflags = ''
+ else:
+ ldflags = '\"' + ldflags + '\",'
+
+ # Two different diffs are used for aosp or internal android repo.
+ if android_type == 'aosp':
+ bench_diff = bench + '_flags_aosp.diff'
+ else:
+ bench_diff = bench + '_flags_internal.diff'
+
+ # Replace CFLAGS_FOR_BENCH_SUITE marker with proper cflags
+ output = ''
+ with open(bench_diff) as f:
+ for line in f:
+ line = line.replace('CFLAGS_FOR_BENCH_SUITE', cflags)
+ line = line.replace('LDFLAGS_FOR_BENCH_SUITE', ldflags)
+ output += line
+
+ with open('modified.diff', 'w') as f:
+ f.write(output)
def apply_patches(bench):
- bench_dir = os.path.join(config.android_home, config.bench_dict[bench])
- bench_diff = 'modified.diff'
- flags_patch = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), bench_diff)
- try:
- subprocess.check_call(['git', '-C', bench_dir, 'apply', flags_patch])
- except subprocess.CalledProcessError:
- raise OSError('Patch for adding flags for %s does not succeed.' % (bench))
+ bench_dir = os.path.join(config.android_home, config.bench_dict[bench])
+ bench_diff = 'modified.diff'
+ flags_patch = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), bench_diff)
+ try:
+ subprocess.check_call(['git', '-C', bench_dir, 'apply', flags_patch])
+ except subprocess.CalledProcessError:
+ raise OSError('Patch for adding flags for %s does not succeed.' % bench)
def replace_flags_in_dir(bench, cflags, ldflags):
- bench_mk = os.path.join(config.android_home, config.bench_dict[bench],
- 'Android.mk')
+ bench_mk = os.path.join(config.android_home, config.bench_dict[bench],
+ 'Android.mk')
- if not cflags:
- cflags = ''
- if not ldflags:
- ldflags = ''
+ if not cflags:
+ cflags = ''
+ if not ldflags:
+ ldflags = ''
- output = ''
- with open(bench_mk) as f:
- for line in f:
- line = line.replace('$(CFLAGS_FOR_BENCH_SUITE)', cflags)
- line = line.replace('$(LDFLAGS_FOR_BENCH_SUITE)', ldflags)
- output += line
- with open(bench_mk, 'w') as f:
- f.write(output)
+ output = ''
+ with open(bench_mk) as f:
+ for line in f:
+ line = line.replace('$(CFLAGS_FOR_BENCH_SUITE)', cflags)
+ line = line.replace('$(LDFLAGS_FOR_BENCH_SUITE)', ldflags)
+ output += line
+ with open(bench_mk, 'w') as f:
+ f.write(output)
def add_flags_Panorama(cflags, ldflags):
- backup_file('Panorama', 'mk')
- replace_flags_in_dir('Panorama', cflags, ldflags)
+ backup_file('Panorama', 'mk')
+ replace_flags_in_dir('Panorama', cflags, ldflags)
def add_flags_Synthmark(cflags, ldflags):
- backup_file('Synthmark', 'mk')
- replace_flags_in_dir('Synthmark', cflags, ldflags)
+ backup_file('Synthmark', 'mk')
+ replace_flags_in_dir('Synthmark', cflags, ldflags)
def add_flags_Skia(cflags, ldflags):
- backup_file('Skia', 'bp')
- replace_flags('Skia', config.android_type, 'bp', cflags, ldflags)
- apply_patches('Skia')
+ backup_file('Skia', 'bp')
+ replace_flags('Skia', config.android_type, 'bp', cflags, ldflags)
+ apply_patches('Skia')
def add_flags_Binder(cflags, ldflags):
- backup_file('Binder', 'bp')
- replace_flags('Binder', config.android_type, 'bp', cflags, ldflags)
- apply_patches('Binder')
+ backup_file('Binder', 'bp')
+ replace_flags('Binder', config.android_type, 'bp', cflags, ldflags)
+ apply_patches('Binder')
def add_flags_Hwui(cflags, ldflags):
- backup_file('Hwui', 'bp')
- replace_flags('Hwui', config.android_type, 'bp', cflags, ldflags)
- apply_patches('Hwui')
+ backup_file('Hwui', 'bp')
+ replace_flags('Hwui', config.android_type, 'bp', cflags, ldflags)
+ apply_patches('Hwui')
def add_flags_Dex2oat(cflags, ldflags):
- backup_file('Dex2oat', 'bp')
- replace_flags('Dex2oat', config.android_type, 'bp', cflags, ldflags)
- apply_patches('Dex2oat')
+ backup_file('Dex2oat', 'bp')
+ replace_flags('Dex2oat', config.android_type, 'bp', cflags, ldflags)
+ apply_patches('Dex2oat')