aboutsummaryrefslogtreecommitdiff
path: root/android_bench_suite/run.py
blob: 55acb663a936a0d2692678332af35de5811fc424 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
#!/usr/bin/env python2
#
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# pylint: disable=cros-logging-import

# This is the script to run specified benchmark with different toolchain
# settings. It includes the process of building benchmark locally and running
# benchmark on DUT.

"""Main script to run the benchmark suite from building to testing."""
from __future__ import print_function

import argparse
import config
import ConfigParser
import logging
import os
import subprocess
import sys

logging.basicConfig(level=logging.INFO)

def _parse_arguments(argv):
  parser = argparse.ArgumentParser(description='Build and run specific '
                                   'benchamrk')
  parser.add_argument(
      '-b',
      '--bench',
      action='append',
      default=[],
      help='Select which benchmark to run')

  # Only one of compiler directory and llvm prebuilts version can be indicated
  # at the beginning, so set -c and -l into a exclusive group.
  group = parser.add_mutually_exclusive_group()

  # The toolchain setting arguments has action of 'append', so that users
  # could compare performance with several toolchain settings together.
  group.add_argument(
      '-c',
      '--compiler_dir',
      metavar='DIR',
      action='append',
      default=[],
      help='Specify path to the compiler\'s bin directory. '
      'You shall give several paths, each with a -c, to '
      'compare performance differences in '
      'each compiler.')

  parser.add_argument(
      '-o',
      '--build_os',
      action='append',
      default=[],
      help='Specify the host OS to build the benchmark.')

  group.add_argument(
      '-l',
      '--llvm_prebuilts_version',
      action='append',
      default=[],
      help='Specify the version of prebuilt LLVM. When '
      'specific prebuilt version of LLVM already '
      'exists, no need to pass the path to compiler '
      'directory.')

  parser.add_argument(
      '-f',
      '--cflags',
      action='append',
      default=[],
      help='Specify the cflags options for the toolchain. '
      'Be sure to quote all the cflags with quotation '
      'mark("") or use equal(=).')
  parser.add_argument(
      '--ldflags',
      action='append',
      default=[],
      help='Specify linker flags for the toolchain.')

  parser.add_argument(
      '-i',
      '--iterations',
      type=int,
      default=1,
      help='Specify how many iterations does the test '
      'take.')

  # Arguments -s and -r are for connecting to DUT.
  parser.add_argument(
      '-s',
      '--serials',
      help='Comma separate list of device serials under '
      'test.')

  parser.add_argument(
      '-r',
      '--remote',
      default='localhost',
      help='hostname[:port] if the ADB device is connected '
      'to a remote machine. Ensure this workstation '
      'is configured for passwordless ssh access as '
      'users "root" or "adb"')

  # Arguments -frequency and -m are for device settings
  parser.add_argument(
      '--frequency',
      type=int,
      default=960000,
      help='Specify the CPU frequency of the device. The '
      'unit is KHZ. The available value is defined in'
      'cpufreq/scaling_available_frequency file in '
      'device\'s each core directory. '
      'The default value is 960000, which shows a '
      'balance in noise and performance. Lower '
      'frequency will slow down the performance but '
      'reduce noise.')

  parser.add_argument(
      '-m',
      '--mode',
      default='little',
      help='User can specify whether \'little\' or \'big\' '
      'mode to use. The default one is little mode. '
      'The little mode runs on a single core of '
      'Cortex-A53, while big mode runs on single core '
      'of Cortex-A57.')

  # Configure file for benchmark test
  parser.add_argument(
      '-t',
      '--test',
      help='Specify the test settings with configuration '
      'file.')

  # Whether to keep old json result or not
  parser.add_argument(
      '-k',
      '--keep',
      default='False',
      help='User can specify whether to keep the old json '
      'results from last run. This can be useful if you '
      'want to compare performance differences in two or '
      'more different runs. Default is False(off).')

  return parser.parse_args(argv)


# Clear old log files in bench suite directory
def clear_logs():
  logging.info('Removing old logfiles...')
  for f in ['build_log', 'device_log', 'test_log']:
    logfile = os.path.join(config.bench_suite_dir, f)
    try:
      os.remove(logfile)
    except OSError:
      logging.info('No logfile %s need to be removed. Ignored.', f)
  logging.info('Old logfiles been removed.')


# Clear old json files in bench suite directory
def clear_results():
  logging.info('Clearing old json results...')
  for bench in config.bench_list:
    result = os.path.join(config.bench_suite_dir, bench + '.json')
    try:
      os.remove(result)
    except OSError:
      logging.info('no %s json file need to be removed. Ignored.', bench)
  logging.info('Old json results been removed.')


# Use subprocess.check_call to run other script, and put logs to files
def check_call_with_log(cmd, log_file):
  log_file = os.path.join(config.bench_suite_dir, log_file)
  with open(log_file, 'a') as logfile:
    log_header = 'Log for command: %s\n' % (cmd)
    logfile.write(log_header)
    try:
      subprocess.check_call(cmd, stdout=logfile)
    except subprocess.CalledProcessError:
      logging.error('Error running %s, please check %s for more info.', cmd,
                    log_file)
      raise
  logging.info('Logs for %s are written to %s.', cmd, log_file)


def set_device(serials, remote, frequency):
  setting_cmd = [
      os.path.join(
          os.path.join(config.android_home, config.autotest_dir),
          'site_utils/set_device.py')
  ]
  setting_cmd.append('-r=' + remote)
  setting_cmd.append('-q=' + str(frequency))

  # Deal with serials.
  # If there is no serails specified, try to run test on the only device.
  # If specified, split the serials into a list and run test on each device.
  if serials:
    for serial in serials.split(','):
      setting_cmd.append('-s=' + serial)
      check_call_with_log(setting_cmd, 'device_log')
      setting_cmd.pop()
  else:
    check_call_with_log(setting_cmd, 'device_log')

  logging.info('CPU mode and frequency set successfully!')


def log_ambiguous_args():
  logging.error('The count of arguments does not match!')
  raise ValueError('The count of arguments does not match.')


# Check if the count of building arguments are log_ambiguous or not.  The
# number of -c/-l, -f, and -os should be either all 0s or all the same.
def check_count(compiler, llvm_version, build_os, cflags, ldflags):
  # Count will be set to 0 if no compiler or llvm_version specified.
  # Otherwise, one of these two args length should be 0 and count will be
  # the other one.
  count = max(len(compiler), len(llvm_version))

  # Check if number of cflags is 0 or the same with before.
  if len(cflags) != 0:
    if count != 0 and len(cflags) != count:
      log_ambiguous_args()
    count = len(cflags)

  if len(ldflags) != 0:
    if count != 0 and len(ldflags) != count:
      log_ambiguous_args()
    count = len(ldflags)

  if len(build_os) != 0:
    if count != 0 and len(build_os) != count:
      log_ambiguous_args()
    count = len(build_os)

  # If no settings are passed, only run default once.
  return max(1, count)


# Build benchmark binary with toolchain settings
def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
                ldflags):
  # Build benchmark locally
  build_cmd = ['./build_bench.py', '-b=' + bench]
  if compiler:
    build_cmd.append('-c=' + compiler[setting_no])
  if llvm_version:
    build_cmd.append('-l=' + llvm_version[setting_no])
  if build_os:
    build_cmd.append('-o=' + build_os[setting_no])
  if cflags:
    build_cmd.append('-f=' + cflags[setting_no])
  if ldflags:
    build_cmd.append('--ldflags=' + ldflags[setting_no])

  logging.info('Building benchmark for toolchain setting No.%d...', setting_no)
  logging.info('Command: %s', build_cmd)

  try:
    subprocess.check_call(build_cmd)
  except:
    logging.error('Error while building benchmark!')
    raise


def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):

  # Run autotest script for benchmark on DUT
  check_call_with_log(test_cmd, 'test_log')

  logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
               'device %s.', setting_no, i, serial)

  # Rename results from the bench_result generated in autotest
  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
  if not os.path.exists(bench_result):
    logging.error('No result found at %s, '
                  'please check test_log for details.', bench_result)
    raise OSError('Result file %s not found.' % bench_result)

  new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial, setting_no, i)
  new_bench_result_path = os.path.join(config.bench_suite_dir, new_bench_result)
  try:
    os.rename(bench_result, new_bench_result_path)
  except OSError:
    logging.error('Error while renaming raw result %s to %s', bench_result,
                  new_bench_result_path)
    raise

  logging.info('Benchmark result saved at %s.', new_bench_result_path)


def test_bench(bench, setting_no, iterations, serials, remote, mode):
  logging.info('Start running benchmark on device...')

  # Run benchmark and tests on DUT
  for i in xrange(iterations):
    logging.info('Iteration No.%d:', i)
    test_cmd = [
        os.path.join(
            os.path.join(config.android_home, config.autotest_dir),
            'site_utils/test_bench.py')
    ]
    test_cmd.append('-b=' + bench)
    test_cmd.append('-r=' + remote)
    test_cmd.append('-m=' + mode)

    # Deal with serials.
    # If there is no serails specified, try to run test on the only device.
    # If specified, split the serials into a list and run test on each device.
    if serials:
      for serial in serials.split(','):
        test_cmd.append('-s=' + serial)

        run_and_collect_result(test_cmd, setting_no, i, bench, serial)
        test_cmd.pop()
    else:
      run_and_collect_result(test_cmd, setting_no, i, bench)


def gen_json(bench, setting_no, iterations, serials):
  bench_result = os.path.join(config.bench_suite_dir, 'bench_result')

  logging.info('Generating JSON file for Crosperf...')

  if not serials:
    serials = 'default'

  for serial in serials.split(','):

    # Platform will be used as device lunch combo instead
    #experiment = '_'.join([serial, str(setting_no)])
    experiment = config.product_combo

    # Input format: bench_result_{bench}_{serial}_{setting_no}_
    input_file = '_'.join([bench_result, bench, serial, str(setting_no), ''])
    gen_json_cmd = [
        './gen_json.py', '--input=' + input_file,
        '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
        '--bench=' + bench, '--platform=' + experiment,
        '--iterations=' + str(iterations)
    ]

    logging.info('Command: %s', gen_json_cmd)
    if subprocess.call(gen_json_cmd):
      logging.error('Error while generating JSON file, please check raw data'
                    'of the results at %s.', input_file)


def gen_crosperf(infile, outfile):
  # Set environment variable for crosperf
  os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)

  logging.info('Generating Crosperf Report...')
  crosperf_cmd = [
      os.path.join(config.toolchain_utils, 'generate_report.py'),
      '-i=' + infile, '-o=' + outfile, '-f'
  ]

  # Run crosperf generate_report.py
  logging.info('Command: %s', crosperf_cmd)
  subprocess.call(crosperf_cmd)

  logging.info('Report generated successfully!')
  logging.info('Report Location: ' + outfile + '.html at bench'
               'suite directory.')


def main(argv):
  # Set environment variable for the local loacation of benchmark suite.
  # This is for collecting testing results to benchmark suite directory.
  os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir

  # Set Android type, used for the difference part between aosp and internal.
  os.environ['ANDROID_TYPE'] = config.android_type

  # Set ANDROID_HOME for both building and testing.
  os.environ['ANDROID_HOME'] = config.android_home

  # Set environment variable for architecture, this will be used in
  # autotest.
  os.environ['PRODUCT'] = config.product

  arguments = _parse_arguments(argv)

  bench_list = arguments.bench
  if not bench_list:
    bench_list = config.bench_list

  compiler = arguments.compiler_dir
  build_os = arguments.build_os
  llvm_version = arguments.llvm_prebuilts_version
  cflags = arguments.cflags
  ldflags = arguments.ldflags
  iterations = arguments.iterations
  serials = arguments.serials
  remote = arguments.remote
  frequency = arguments.frequency
  mode = arguments.mode
  keep = arguments.keep

  # Clear old logs every time before run script
  clear_logs()

  if keep == 'False':
    clear_results()

  # Set test mode and frequency of CPU on the DUT
  set_device(serials, remote, frequency)

  test = arguments.test
  # if test configuration file has been given, use the build settings
  # in the configuration file and run the test.
  if test:
    test_config = ConfigParser.ConfigParser(allow_no_value=True)
    if not test_config.read(test):
      logging.error('Error while reading from building '
                    'configuration file %s.', test)
      raise RuntimeError('Error while reading configuration file %s.' % test)

    for setting_no, section in enumerate(test_config.sections()):
      bench = test_config.get(section, 'bench')
      compiler = [test_config.get(section, 'compiler')]
      build_os = [test_config.get(section, 'build_os')]
      llvm_version = [test_config.get(section, 'llvm_version')]
      cflags = [test_config.get(section, 'cflags')]
      ldflags = [test_config.get(section, 'ldflags')]

      # Set iterations from test_config file, if not exist, use the one from
      # command line.
      it = test_config.get(section, 'iterations')
      if not it:
        it = iterations
      it = int(it)

      # Build benchmark for each single test configuration
      build_bench(0, bench, compiler, llvm_version, build_os, cflags, ldflags)

      test_bench(bench, setting_no, it, serials, remote, mode)

      gen_json(bench, setting_no, it, serials)

    for bench in config.bench_list:
      infile = os.path.join(config.bench_suite_dir, bench + '.json')
      if os.path.exists(infile):
        outfile = os.path.join(config.bench_suite_dir, bench + '_report')
        gen_crosperf(infile, outfile)

    # Stop script if there is only config file provided
    return 0

  # If no configuration file specified, continue running.
  # Check if the count of the setting arguments are log_ambiguous.
  setting_count = check_count(compiler, llvm_version, build_os, cflags, ldflags)

  for bench in bench_list:
    logging.info('Start building and running benchmark: [%s]', bench)
    # Run script for each toolchain settings
    for setting_no in xrange(setting_count):
      build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
                  ldflags)

      # Run autotest script for benchmark test on device
      test_bench(bench, setting_no, iterations, serials, remote, mode)

      gen_json(bench, setting_no, iterations, serials)

    infile = os.path.join(config.bench_suite_dir, bench + '.json')
    outfile = os.path.join(config.bench_suite_dir, bench + '_report')
    gen_crosperf(infile, outfile)


if __name__ == '__main__':
  main(sys.argv[1:])