summaryrefslogtreecommitdiff
path: root/build
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2013-08-23 16:39:15 +0100
committerTorne (Richard Coles) <torne@google.com>2013-08-23 16:39:15 +0100
commit3551c9c881056c480085172ff9840cab31610854 (patch)
tree23660320f5f4c279966609cf9da7491b96d10ca8 /build
parent4e9d9adbbb6cf287125ca44a0823791a570472f5 (diff)
downloadchromium_org-3551c9c881056c480085172ff9840cab31610854.tar.gz
Merge from Chromium at DEPS revision r219274
This commit was generated by merge_to_master.py. Change-Id: Ibb7f41396cadf4071e89153e1913c986d126f65d
Diffstat (limited to 'build')
-rw-r--r--build/all.gyp19
-rw-r--r--build/all_android.gyp20
-rwxr-xr-xbuild/android/adb_profile_chrome7
-rw-r--r--build/android/ant/apk-package.xml55
-rwxr-xr-xbuild/android/bb_run_sharded_steps.py213
-rwxr-xr-xbuild/android/buildbot/bb_device_steps.py74
-rwxr-xr-xbuild/android/buildbot/bb_host_steps.py1
-rwxr-xr-xbuild/android/buildbot/bb_run_bot.py13
-rw-r--r--build/android/buildbot/bb_utils.py2
-rw-r--r--build/android/dex_action.gypi2
-rwxr-xr-xbuild/android/envsetup.sh8
-rwxr-xr-xbuild/android/envsetup_functions.sh18
-rw-r--r--build/android/findbugs_filter/findbugs_exclude.xml8
-rwxr-xr-xbuild/android/generate_emma_html.py100
-rwxr-xr-xbuild/android/gyp/ant.py4
-rwxr-xr-xbuild/android/gyp/dex.py21
-rwxr-xr-xbuild/android/gyp/emma_instr.py188
-rwxr-xr-xbuild/android/gyp/proguard.py55
-rw-r--r--build/android/instr_action.gypi51
-rw-r--r--build/android/pylib/android_commands.py46
-rw-r--r--build/android/pylib/base/base_test_runner.py10
-rw-r--r--build/android/pylib/base/test_dispatcher.py53
-rw-r--r--build/android/pylib/base/test_dispatcher_unittest.py8
-rw-r--r--build/android/pylib/chrome_test_server_spawner.py14
-rw-r--r--build/android/pylib/constants.py16
-rw-r--r--build/android/pylib/device_stats_monitor.py5
-rw-r--r--build/android/pylib/fake_dns.py6
-rw-r--r--build/android/pylib/gtest/gtest_config.py4
-rw-r--r--build/android/pylib/gtest/setup.py27
-rw-r--r--build/android/pylib/gtest/test_options.py1
-rw-r--r--build/android/pylib/gtest/test_package_apk.py6
-rw-r--r--build/android/pylib/gtest/test_package_exe.py6
-rw-r--r--build/android/pylib/gtest/test_runner.py1
-rw-r--r--build/android/pylib/host_driven/setup.py1
-rw-r--r--build/android/pylib/host_driven/test_case.py64
-rw-r--r--build/android/pylib/host_driven/test_runner.py11
-rw-r--r--build/android/pylib/host_driven/tests_annotations.py2
-rw-r--r--build/android/pylib/instrumentation/test_options.py2
-rw-r--r--build/android/pylib/instrumentation/test_runner.py29
-rw-r--r--build/android/pylib/monkey/test_options.py1
-rw-r--r--build/android/pylib/monkey/test_runner.py2
-rw-r--r--build/android/pylib/perf/__init__.py3
-rw-r--r--build/android/pylib/perf/setup.py74
-rw-r--r--build/android/pylib/perf/test_options.py13
-rw-r--r--build/android/pylib/perf/test_runner.py155
-rw-r--r--build/android/pylib/uiautomator/test_options.py1
-rw-r--r--build/android/pylib/uiautomator/test_runner.py2
-rw-r--r--build/android/pylib/utils/command_option_parser.py76
-rw-r--r--build/android/pylib/utils/report_results.py9
-rwxr-xr-xbuild/android/run_browser_tests.py23
-rwxr-xr-xbuild/android/run_instrumentation_tests.py23
-rwxr-xr-xbuild/android/run_tests.py23
-rwxr-xr-xbuild/android/run_uiautomator_tests.py24
-rwxr-xr-xbuild/android/test_runner.py198
-rwxr-xr-xbuild/android/update_verification.py (renamed from build/android/run_update_verification.py)2
-rw-r--r--build/common.gypi35
-rw-r--r--build/filename_rules.gypi2
-rwxr-xr-xbuild/gdb-add-index124
-rwxr-xr-xbuild/get_landmines.py63
-rw-r--r--build/ios/grit_whitelist.txt4
-rw-r--r--build/java.gypi94
-rw-r--r--build/java_apk.gypi44
-rw-r--r--build/java_prebuilt.gypi55
-rw-r--r--build/landmine_utils.py114
-rwxr-xr-xbuild/landmines.py160
-rw-r--r--build/linux/system.gyp25
-rw-r--r--build/linux/unbundle/README28
-rw-r--r--build/linux/unbundle/openssl.gyp25
-rwxr-xr-xbuild/linux/unbundle/remove_bundled_libraries.py87
-rwxr-xr-xbuild/linux/unbundle/replace_gyp_files.py1
-rw-r--r--build/shim_headers.gypi7
-rw-r--r--build/slave/OWNERS24
-rw-r--r--build/slave/README8
-rw-r--r--build/util/LASTCHANGE2
-rw-r--r--build/util/LASTCHANGE.blink2
-rw-r--r--build/whitespace_file.txt1
76 files changed, 1906 insertions, 799 deletions
diff --git a/build/all.gyp b/build/all.gyp
index d90b5a5795..e81c9c86a7 100644
--- a/build/all.gyp
+++ b/build/all.gyp
@@ -342,6 +342,14 @@
'../chrome/chrome.gyp:performance_browser_tests',
'../chrome/chrome.gyp:performance_ui_tests',
'../chrome/chrome.gyp:sync_performance_tests',
+ '../tools/perf/clear_system_cache/clear_system_cache.gyp:*',
+ ],
+ 'conditions': [
+ ['OS!="ios" and OS!="win"', {
+ 'dependencies': [
+ '../breakpad/breakpad.gyp:minidump_stackwalk',
+ ],
+ }],
],
}, # target_name: chromium_builder_perf
{
@@ -466,8 +474,8 @@
# We refer to content_shell directly rather than all_webkit
# because we don't want the _unittests binaries.
- '../content/content.gyp:content_browsertests',
- '../content/content.gyp:content_shell',
+ '../content/content.gyp:content_browsertests',
+ '../content/content.gyp:content_shell',
'../net/net.gyp:dns_fuzz_stub',
],
@@ -514,6 +522,7 @@
'../sync/sync.gyp:sync_unit_tests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
+ '../tools/perf/clear_system_cache/clear_system_cache.gyp:*',
'../ui/ui.gyp:ui_unittests',
'../url/url.gyp:url_unittests',
'../webkit/renderer/compositor_bindings/compositor_bindings_tests.gyp:webkit_compositor_bindings_unittests',
@@ -545,6 +554,7 @@
'../sync/sync.gyp:sync_unit_tests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
+ '../tools/perf/clear_system_cache/clear_system_cache.gyp:*',
'../ui/ui.gyp:ui_unittests',
'../url/url.gyp:url_unittests',
'../webkit/renderer/compositor_bindings/compositor_bindings_tests.gyp:webkit_compositor_bindings_unittests',
@@ -613,6 +623,7 @@
'dependencies': [
'../cc/cc_tests.gyp:cc_unittests',
'../chrome/chrome.gyp:browser_tests',
+ '../chrome/chrome.gyp:gcapi_test',
'../chrome/chrome.gyp:installer_util_unittests',
'../chrome/chrome.gyp:interactive_ui_tests',
'../chrome/chrome.gyp:mini_installer_test',
@@ -641,6 +652,7 @@
'../sync/sync.gyp:sync_unit_tests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
+ '../tools/perf/clear_system_cache/clear_system_cache.gyp:*',
'../ui/ui.gyp:ui_unittests',
'../ui/views/views.gyp:views_unittests',
'../url/url.gyp:url_unittests',
@@ -743,7 +755,9 @@
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:crash_service',
+ '../chrome/chrome.gyp:gcapi_dll',
'../chrome/chrome.gyp:policy_templates',
+ '../courgette/courgette.gyp:courgette64',
'../chrome/installer/mini_installer.gyp:mini_installer',
'../courgette/courgette.gyp:courgette',
'../cloud_print/cloud_print.gyp:cloud_print',
@@ -767,7 +781,6 @@
'dependencies': [
'../chrome/chrome.gyp:crash_service_win64',
'../chrome_frame/chrome_frame.gyp:npchrome_frame',
- '../courgette/courgette.gyp:courgette64',
# Omitting tests from Win64 to speed up cycle times.
'../chrome/chrome.gyp:automated_ui_tests',
'../chrome/chrome.gyp:chromedriver',
diff --git a/build/all_android.gyp b/build/all_android.gyp
index 02965807a9..70124e04c4 100644
--- a/build/all_android.gyp
+++ b/build/all_android.gyp
@@ -40,8 +40,8 @@
{
# The current list of tests for android. This is temporary
# until the full set supported. If adding a new test here,
- # please also add it to build/android/run_tests.py, else the
- # test is not run.
+ # please also add it to build/android/pylib/gtest/gtest_config.py,
+ # else the test is not run.
#
# WARNING:
# Do not add targets here without communicating the implications
@@ -116,6 +116,22 @@
],
},
{
+ # WebRTC Android APK tests.
+ 'target_name': 'android_builder_webrtc',
+ 'type': 'none',
+ 'variables': {
+ # WebRTC tests are normally not built by Chromium bots.
+ 'include_tests%': 0,
+ },
+ 'conditions': [
+ ['"<(gtest_target_type)"=="shared_library" and include_tests==1', {
+ 'dependencies': [
+ '../third_party/webrtc/build/apk_tests.gyp:*',
+ ],
+ }],
+ ],
+ }, # target_name: android_builder_webrtc
+ {
# Experimental / in-progress targets that are expected to fail
# but we still try to compile them on bots (turning the stage
# orange, not red).
diff --git a/build/android/adb_profile_chrome b/build/android/adb_profile_chrome
index c4445d1772..79a3d5dc69 100755
--- a/build/android/adb_profile_chrome
+++ b/build/android/adb_profile_chrome
@@ -22,6 +22,8 @@ usage() {
echo " (Default is /sdcard/Download/chrome-profile-results-*)"
echo " --categories|-c C Select categories to trace with comma-delimited wildcards."
echo " e.g. '*', 'cat1*,-cat1a'. Default is '*'."
+ echo " --continuous Using the trace buffer as a ring buffer, continuously"
+ echo " profile until stopped."
echo " --stop Stop profiling."
echo " --download|-d Download latest trace."
echo " --time|-t N Profile for N seconds and download the resulting trace."
@@ -110,6 +112,7 @@ while test -n "$1"; do
CATEGORIES="-e categories '$2'"
shift
;;
+ --continuous) CONTINUOUS="-e continuous ." ;;
-t|--time)
shift
if [ -z "$1" ] ; then
@@ -131,9 +134,9 @@ if [ -z "${INTERVAL}" ] ; then
if [ -z "${FUNCTION}" ] ; then
usage
else
- send_intent ${PACKAGE} ${FUNCTION} ${OUTPUT} ${CATEGORIES}
+ send_intent ${PACKAGE} ${FUNCTION} ${OUTPUT} ${CATEGORIES} ${CONTINUOUS}
fi
else
- do_timed_capture ${PACKAGE} ${INTERVAL} ${CATEGORIES}
+ do_timed_capture ${PACKAGE} ${INTERVAL} ${CATEGORIES} ${CONTINUOUS}
fi
exit 0
diff --git a/build/android/ant/apk-package.xml b/build/android/ant/apk-package.xml
index eeb156c94f..69f5501552 100644
--- a/build/android/ant/apk-package.xml
+++ b/build/android/ant/apk-package.xml
@@ -21,6 +21,11 @@
<property name="out.absolute.dir" location="${out.dir}" />
<property name="sdk.dir" location="${ANDROID_SDK_ROOT}"/>
+ <property name="emma.device.jar" location="${EMMA_DEVICE_JAR}" />
+
+ <condition property="emma.enabled" value="true" else="false">
+ <equals arg1="${EMMA_INSTRUMENT}" arg2="1"/>
+ </condition>
<!-- jar file from where the tasks are loaded -->
<path id="android.antlibs">
@@ -54,21 +59,43 @@
<property name="source.dir" value="${SOURCE_DIR}" />
<property name="source.absolute.dir" location="${source.dir}" />
+ <!-- Macro that enables passing a variable list of external jar files
+ to ApkBuilder. -->
+ <macrodef name="package-helper">
+ <element name="extra-jars" optional="yes" />
+ <sequential>
+ <apkbuilder
+ outfolder="${out.absolute.dir}"
+ resourcefile="${resource.package.file.name}"
+ apkfilepath="${out.packaged.file}"
+ debugpackaging="${build.is.packaging.debug}"
+ debugsigning="${build.is.signing.debug}"
+ verbose="${verbose}"
+ hascode="true"
+ previousBuildType="/"
+ buildType="${build.is.packaging.debug}/${build.is.signing.debug}">
+ <dex path="${intermediate.dex.file}"/>
+ <sourcefolder path="${source.absolute.dir}"/>
+ <nativefolder path="${native.libs.absolute.dir}" />
+ <extra-jars/>
+ </apkbuilder>
+ </sequential>
+ </macrodef>
+
+
<!-- Packages the application. -->
<target name="-package">
- <apkbuilder
- outfolder="${out.absolute.dir}"
- resourcefile="${resource.package.file.name}"
- apkfilepath="${out.packaged.file}"
- debugpackaging="${build.is.packaging.debug}"
- debugsigning="${build.is.signing.debug}"
- verbose="${verbose}"
- hascode="true"
- previousBuildType="/"
- buildType="${build.is.packaging.debug}/${build.is.signing.debug}">
- <dex path="${intermediate.dex.file}"/>
- <sourcefolder path="${source.absolute.dir}"/>
- <nativefolder path="${native.libs.absolute.dir}" />
- </apkbuilder>
+ <if condition="${emma.enabled}">
+ <then>
+ <package-helper>
+ <extra-jars>
+ <jarfile path="${emma.device.jar}" />
+ </extra-jars>
+ </package-helper>
+ </then>
+ <else>
+ <package-helper />
+ </else>
+ </if>
</target>
</project>
diff --git a/build/android/bb_run_sharded_steps.py b/build/android/bb_run_sharded_steps.py
index 9b36957068..6124819cb5 100755
--- a/build/android/bb_run_sharded_steps.py
+++ b/build/android/bb_run_sharded_steps.py
@@ -4,187 +4,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Helper script to shard build bot steps and save results to disk.
-
-Our buildbot infrastructure requires each slave to run steps serially.
-This is sub-optimal for android, where these steps can run independently on
-multiple connected devices.
-
-The buildbots will run this script multiple times per cycle:
-- First: all steps listed in -s in will be executed in parallel using all
-connected devices. Step results will be pickled to disk. Each step has a unique
-name. The result code will be ignored if the step name is listed in
---flaky_steps.
-The buildbot will treat this step as a regular step, and will not process any
-graph data.
-
-- Then, with -p STEP_NAME: at this stage, we'll simply print the file with the
-step results previously saved. The buildbot will then process the graph data
-accordingly.
-
-The JSON steps file contains a dictionary in the format:
-{
- "step_name_foo": "script_to_execute foo",
- "step_name_bar": "script_to_execute bar"
-}
-
-The JSON flaky steps file contains a list with step names which results should
-be ignored:
-[
- "step_name_foo",
- "step_name_bar"
-]
-
-Note that script_to_execute necessarily have to take at least the following
-options:
- --device: the serial number to be passed to all adb commands.
- --keep_test_server_ports: indicates it's being run as a shard, and shouldn't
- reset test server port allocation.
+"""DEPRECATED!
+TODO(bulach): remove me once all other repositories reference
+'test_runner.py perf' directly.
"""
-
-import datetime
-import json
-import logging
-import multiprocessing
import optparse
-import pexpect
-import pickle
import os
-import signal
-import shutil
import sys
-import time
-from pylib import android_commands
from pylib import cmd_helper
-from pylib import constants
-from pylib import forwarder
-from pylib import ports
-
-
-_OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results')
-
-
-def _SaveResult(result):
- with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f:
- f.write(pickle.dumps(result))
-
-
-def _RunStepsPerDevice(steps):
- results = []
- for step in steps:
- start_time = datetime.datetime.now()
- print 'Starting %s: %s %s at %s' % (step['name'], step['cmd'],
- start_time, step['device'])
- output, exit_code = pexpect.run(
- step['cmd'], cwd=os.path.abspath(constants.DIR_SOURCE_ROOT),
- withexitstatus=True, logfile=sys.stdout, timeout=1800,
- env=os.environ)
- exit_code = exit_code or 0
- end_time = datetime.datetime.now()
- exit_msg = '%s %s' % (exit_code,
- '(ignored, flaky step)' if step['is_flaky'] else '')
- print 'Finished %s: %s %s %s at %s' % (step['name'], exit_msg, step['cmd'],
- end_time, step['device'])
- if step['is_flaky']:
- exit_code = 0
- result = {'name': step['name'],
- 'output': output,
- 'exit_code': exit_code,
- 'total_time': (end_time - start_time).seconds,
- 'device': step['device']}
- _SaveResult(result)
- results += [result]
- return results
-
-
-def _RunShardedSteps(steps, flaky_steps, devices):
- assert steps
- assert devices, 'No devices connected?'
- if os.path.exists(_OUTPUT_DIR):
- assert '/step_results' in _OUTPUT_DIR
- shutil.rmtree(_OUTPUT_DIR)
- if not os.path.exists(_OUTPUT_DIR):
- os.makedirs(_OUTPUT_DIR)
- step_names = sorted(steps.keys())
- all_params = []
- num_devices = len(devices)
- shard_size = (len(steps) + num_devices - 1) / num_devices
- for i, device in enumerate(devices):
- steps_per_device = []
- for s in steps.keys()[i * shard_size:(i + 1) * shard_size]:
- steps_per_device += [{'name': s,
- 'device': device,
- 'is_flaky': s in flaky_steps,
- 'cmd': steps[s] + ' --device ' + device +
- ' --keep_test_server_ports'}]
- all_params += [steps_per_device]
- print 'Start sharding (note: output is not synchronized...)'
- print '*' * 80
- start_time = datetime.datetime.now()
- pool = multiprocessing.Pool(processes=num_devices)
- async_results = pool.map_async(_RunStepsPerDevice, all_params)
- results_per_device = async_results.get(999999)
- end_time = datetime.datetime.now()
- print '*' * 80
- print 'Finished sharding.'
- print 'Summary'
- total_time = 0
- for results in results_per_device:
- for result in results:
- print('%s : exit_code=%d in %d secs at %s' %
- (result['name'], result['exit_code'], result['total_time'],
- result['device']))
- total_time += result['total_time']
- print 'Step time: %d secs' % ((end_time - start_time).seconds)
- print 'Bots time: %d secs' % total_time
- # No exit_code for the sharding step: the individual _PrintResults step
- # will return the corresponding exit_code.
- return 0
-
-
-def _PrintStepOutput(step_name):
- file_name = os.path.join(_OUTPUT_DIR, step_name)
- if not os.path.exists(file_name):
- print 'File not found ', file_name
- return 1
- with file(file_name, 'r') as f:
- result = pickle.loads(f.read())
- print result['output']
- return result['exit_code']
-
-
-def _PrintAllStepsOutput(steps):
- with file(steps, 'r') as f:
- steps = json.load(f)
- ret = 0
- for step_name in steps.keys():
- ret |= _PrintStepOutput(step_name)
- return ret
-
-
-def _KillPendingServers():
- for retry in range(5):
- for server in ['lighttpd', 'web-page-replay']:
- pids = cmd_helper.GetCmdOutput(['pgrep', '-f', server])
- pids = [pid.strip() for pid in pids.split('\n') if pid.strip()]
- for pid in pids:
- try:
- logging.warning('Killing %s %s', server, pid)
- os.kill(int(pid), signal.SIGQUIT)
- except Exception as e:
- logging.warning('Failed killing %s %s %s', server, pid, e)
- # Restart the adb server with taskset to set a single CPU affinity.
- cmd_helper.RunCmd(['adb', 'kill-server'])
- cmd_helper.RunCmd(['taskset', '-c', '0', 'adb', 'start-server'])
- cmd_helper.RunCmd(['taskset', '-c', '0', 'adb', 'root'])
- i = 1
- while not android_commands.GetAttachedDevices():
- time.sleep(i)
- i *= 2
- if i > 10:
- break
def main(argv):
@@ -198,39 +27,15 @@ def main(argv):
parser.add_option('-p', '--print_results',
help='Only prints the results for the previously '
'executed step, do not run it again.')
- parser.add_option('-P', '--print_all',
- help='Only prints the results for the previously '
- 'executed steps, do not run them again.')
options, urls = parser.parse_args(argv)
if options.print_results:
- return _PrintStepOutput(options.print_results)
- if options.print_all:
- return _PrintAllStepsOutput(options.print_all)
-
- # At this point, we should kill everything that may have been left over from
- # previous runs.
- _KillPendingServers()
-
- forwarder.Forwarder.UseMultiprocessing()
-
- # Reset the test port allocation. It's important to do it before starting
- # to dispatch any step.
- if not ports.ResetTestServerPortAllocation():
- raise Exception('Failed to reset test server port.')
-
- # Sort the devices so that we'll try to always run a step in the same device.
- devices = sorted(android_commands.GetAttachedDevices())
- if not devices:
- print 'You must attach a device'
- return 1
-
- with file(options.steps, 'r') as f:
- steps = json.load(f)
- flaky_steps = []
+ return cmd_helper.RunCmd(['build/android/test_runner.py', 'perf',
+ '--print-step', options.print_results])
+ flaky_options = []
if options.flaky_steps:
- with file(options.flaky_steps, 'r') as f:
- flaky_steps = json.load(f)
- return _RunShardedSteps(steps, flaky_steps, devices)
+ flaky_options = ['--flaky-steps', options.flaky_steps]
+ return cmd_helper.RunCmd(['build/android/test_runner.py', 'perf', '-v',
+ '--steps', options.steps] + flaky_options)
if __name__ == '__main__':
diff --git a/build/android/buildbot/bb_device_steps.py b/build/android/buildbot/bb_device_steps.py
index 32438ae8fc..2cb431ed88 100755
--- a/build/android/buildbot/bb_device_steps.py
+++ b/build/android/buildbot/bb_device_steps.py
@@ -5,8 +5,10 @@
import collections
import glob
+import hashlib
import multiprocessing
import os
+import random
import shutil
import sys
@@ -124,7 +126,10 @@ def RunChromeDriverTests(_):
"""Run all the steps for running chromedriver tests."""
bb_annotations.PrintNamedStep('chromedriver_annotation')
RunCmd(['chrome/test/chromedriver/run_buildbot_steps.py',
- '--android-package=%s' % constants.CHROMIUM_TEST_SHELL_PACKAGE])
+ '--android-packages=%s,%s,%s' %
+ (constants.CHROMIUM_TEST_SHELL_PACKAGE,
+ constants.CHROME_STABLE_PACKAGE,
+ constants.CHROME_BETA_PACKAGE)])
def InstallApk(options, test, print_step=False):
"""Install an apk to all phones.
@@ -165,8 +170,10 @@ def RunInstrumentationSuite(options, test, flunk_on_failure=True,
if options.flakiness_server:
args.append('--flakiness-dashboard-server=%s' %
options.flakiness_server)
+ if options.coverage_bucket:
+ args.append('--coverage-dir=%s' % options.coverage_dir)
if test.host_driven_root:
- args.append('--python_test_root=%s' % test.host_driven_root)
+ args.append('--host-driven-root=%s' % test.host_driven_root)
if test.annotation:
args.extend(['-A', test.annotation])
if test.exclude_annotation:
@@ -223,8 +230,7 @@ def RunWebkitLayoutTests(options):
cmd_args.extend(
['--additional-expectations=%s' % os.path.join(CHROME_SRC, *f)])
- RunCmd(['webkit/tools/layout_tests/run_webkit_tests.py'] + cmd_args,
- flunk_on_failure=False)
+ RunCmd(['webkit/tools/layout_tests/run_webkit_tests.py'] + cmd_args)
def SpawnLogcatMonitor():
@@ -237,12 +243,14 @@ def SpawnLogcatMonitor():
RunCmd(['sleep', '5'])
def ProvisionDevices(options):
- # Restart adb to work around bugs, sleep to wait for usb discovery.
- RunCmd(['adb', 'kill-server'])
- RunCmd(['adb', 'start-server'])
- RunCmd(['sleep', '1'])
-
bb_annotations.PrintNamedStep('provision_devices')
+
+ if not bb_utils.TESTING:
+ # Restart adb to work around bugs, sleep to wait for usb discovery.
+ adb = android_commands.AndroidCommands()
+ adb.RestartAdbServer()
+ RunCmd(['sleep', '1'])
+
if options.reboot:
RebootDevices()
provision_cmd = ['build/android/provision_devices.py', '-t', options.target]
@@ -293,6 +301,45 @@ def GetTestStepCmds():
]
+def UploadCoverageData(options, path, coverage_type):
+ """Uploads directory at |path| to Google Storage.
+
+ The directory at path should ostensibly contain HTML coverage data.
+
+ Args:
+ options: Command line options.
+ path: Path to the directory to be uploaded.
+ coverage_type: String used as the first component of the url.
+
+ Returns:
+ None.
+ """
+ revision = options.build_properties.get('got_revision')
+ if not revision:
+ revision = options.build_properties.get('revision', 'testing')
+ bot_id = options.build_properties.get('buildername', 'testing')
+ randhash = hashlib.sha1(str(random.random())).hexdigest()
+ gs_path = '%s/%s/%s/%s/%s' % (options.coverage_bucket, coverage_type,
+ bot_id, revision, randhash)
+
+ RunCmd([bb_utils.GSUTIL_PATH, 'cp', '-R', path, 'gs://%s' % gs_path])
+ bb_annotations.PrintLink(
+ 'Coverage report',
+ 'https://storage.googleapis.com/%s/index.html' % gs_path)
+
+
+def GenerateJavaCoverageReport(options):
+ """Generates an HTML coverage report using EMMA and uploads it."""
+ bb_annotations.PrintNamedStep('java_coverage_report')
+
+ coverage_html = os.path.join(options.coverage_dir, 'coverage_html')
+ RunCmd(['build/android/generate_emma_html.py',
+ '--coverage-dir', options.coverage_dir,
+ '--metadata-dir', os.path.join(CHROME_SRC, 'out', options.target),
+ '--output', os.path.join(coverage_html, 'index.html')])
+ UploadCoverageData(options, coverage_html, 'java')
+
+
def LogcatDump(options):
# Print logcat, kill logcat monitor
bb_annotations.PrintNamedStep('logcat_dump')
@@ -328,6 +375,9 @@ def MainTestWrapper(options):
if options.test_filter:
bb_utils.RunSteps(options.test_filter, GetTestStepCmds(), options)
+ if options.coverage_bucket:
+ GenerateJavaCoverageReport(options)
+
if options.experimental:
RunTestSuites(options, gtest_config.EXPERIMENTAL_TEST_SUITES)
@@ -353,6 +403,9 @@ def GetDeviceStepsOptParser():
help='Install an apk by name')
parser.add_option('--reboot', action='store_true',
help='Reboot devices before running tests')
+ parser.add_option('--coverage-bucket',
+ help=('Bucket name to store coverage results. Coverage is '
+ 'only run if this is set.'))
parser.add_option(
'--flakiness-server',
help='The flakiness dashboard server to which the results should be '
@@ -379,6 +432,9 @@ def main(argv):
return sys.exit('Unknown tests %s' % list(unknown_tests))
setattr(options, 'target', options.factory_properties.get('target', 'Debug'))
+ if options.coverage_bucket:
+ setattr(options, 'coverage_dir',
+ os.path.join(CHROME_SRC, 'out', options.target, 'coverage'))
MainTestWrapper(options)
diff --git a/build/android/buildbot/bb_host_steps.py b/build/android/buildbot/bb_host_steps.py
index a28f5765e4..adb9ca0568 100755
--- a/build/android/buildbot/bb_host_steps.py
+++ b/build/android/buildbot/bb_host_steps.py
@@ -96,7 +96,6 @@ def FindBugs(options):
def BisectPerfRegression(_):
- bb_annotations.PrintNamedStep('Bisect Perf Regression')
RunCmd([SrcPath('tools', 'prepare-bisect-perf-regression.py'),
'-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)])
RunCmd([SrcPath('tools', 'run-bisect-perf-regression.py'),
diff --git a/build/android/buildbot/bb_run_bot.py b/build/android/buildbot/bb_run_bot.py
index 7637024693..23db7ec2ba 100755
--- a/build/android/buildbot/bb_run_bot.py
+++ b/build/android/buildbot/bb_run_bot.py
@@ -19,6 +19,8 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
+CHROMIUM_COVERAGE_BUCKET = 'chromium-code-coverage'
+
_BotConfig = collections.namedtuple(
'BotConfig', ['bot_id', 'host_obj', 'test_obj'])
@@ -144,12 +146,14 @@ def GetBotStepMap():
B('fyi-x86-builder-dbg',
H(compile_step + std_host_tests, experimental, target_arch='x86')),
B('fyi-builder-dbg',
- H(std_build_steps + std_host_tests, experimental)),
+ H(std_build_steps + std_host_tests, experimental,
+ extra_gyp='emma_coverage=1')),
B('x86-builder-dbg',
H(compile_step + std_host_tests, target_arch='x86')),
B('fyi-builder-rel', H(std_build_steps, experimental)),
B('fyi-tests', H(std_test_steps),
- T(std_tests, ['--experimental', flakiness_server])),
+ T(std_tests, ['--experimental', flakiness_server,
+ '--coverage-bucket', CHROMIUM_COVERAGE_BUCKET])),
B('fyi-component-builder-tests-dbg',
H(compile_step, extra_gyp='component=shared_library'),
T(std_tests, ['--experimental', flakiness_server])),
@@ -161,6 +165,10 @@ def GetBotStepMap():
B('webkit-latest-contentshell', H(compile_step),
T(['webkit_layout'], ['--auto-reconnect'])),
B('builder-unit-tests', H(compile_step), T(['unit'])),
+ B('webrtc-builder',
+ H(std_build_steps,
+ extra_args=['--build-targets=android_builder_webrtc'],
+ extra_gyp='include_tests=1 enable_tracing=1')),
B('webrtc-tests', H(std_test_steps), T(['webrtc'], [flakiness_server])),
# Generic builder config (for substring match).
@@ -180,7 +188,6 @@ def GetBotStepMap():
('try-tests', 'main-tests'),
('try-fyi-tests', 'fyi-tests'),
('webkit-latest-tests', 'main-tests'),
- ('webrtc-builder', 'main-builder-rel'),
]
for to_id, from_id in copy_map:
assert to_id not in bot_map
diff --git a/build/android/buildbot/bb_utils.py b/build/android/buildbot/bb_utils.py
index f16540bc69..4aa9b47ac7 100644
--- a/build/android/buildbot/bb_utils.py
+++ b/build/android/buildbot/bb_utils.py
@@ -26,6 +26,8 @@ CHROME_SRC = os.path.abspath(
GOMA_DIR = os.environ.get('GOMA_DIR', os.path.join(BB_BUILD_DIR, 'goma'))
+GSUTIL_PATH = os.path.join(BB_BUILD_DIR, 'third_party', 'gsutil', 'gsutil')
+
def CommandToString(command):
"""Returns quoted command that can be run in bash shell."""
return ' '.join(map(pipes.quote, command))
diff --git a/build/android/dex_action.gypi b/build/android/dex_action.gypi
index ac956b6e34..9b640d6c43 100644
--- a/build/android/dex_action.gypi
+++ b/build/android/dex_action.gypi
@@ -33,6 +33,7 @@
'input_paths': [],
'proguard_enabled%': 'false',
'proguard_enabled_input_path%': '',
+ 'dex_no_locals%': 0,
},
'inputs': [
'<(DEPTH)/build/android/gyp/util/build_utils.py',
@@ -51,6 +52,7 @@
'--configuration-name=<(CONFIGURATION_NAME)',
'--proguard-enabled=<(proguard_enabled)',
'--proguard-enabled-input-path=<(proguard_enabled_input_path)',
+ '--no-locals=<(dex_no_locals)',
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
'--ignore=>!(echo \'>(_inputs)\' | md5sum)',
diff --git a/build/android/envsetup.sh b/build/android/envsetup.sh
index f9e3e5ede1..cacfafc79c 100755
--- a/build/android/envsetup.sh
+++ b/build/android/envsetup.sh
@@ -143,6 +143,14 @@ fi
ANDROID_GOMA_WRAPPER=""
if [[ -d $GOMA_DIR ]]; then
ANDROID_GOMA_WRAPPER="$GOMA_DIR/gomacc"
+ num_cores="$(grep --count ^processor /proc/cpuinfo)"
+# Goma is IO-ish you want more threads than you have cores.
+ let goma_threads=num_cores*2
+ if [ -z "${GOMA_COMPILER_PROXY_THREADS}" -a "${goma_threads}" -gt 16 ]; then
+# The default is 16 threads, if the machine has many cores we crank it up a bit
+ GOMA_COMPILER_PROXY_THREADS="${goma_threads}"
+ export GOMA_COMPILER_PROXY_THREADS
+ fi
fi
export ANDROID_GOMA_WRAPPER
diff --git a/build/android/envsetup_functions.sh b/build/android/envsetup_functions.sh
index 9554bc9f29..fef07ef9cf 100755
--- a/build/android/envsetup_functions.sh
+++ b/build/android/envsetup_functions.sh
@@ -125,6 +125,7 @@ ${ANDROID_SDK_BUILD_TOOLS_VERSION}
;;
"mips")
DEFINES+=" target_arch=mipsel"
+ DEFINES+=" mips_arch_variant=mips32r1"
;;
*)
echo "TARGET_ARCH: ${TARGET_ARCH} is not supported." >& 2
@@ -218,23 +219,24 @@ process_options() {
# > make
################################################################################
sdk_build_init() {
- export ANDROID_SDK_VERSION=18
- export ANDROID_SDK_BUILD_TOOLS_VERSION=18.0.1
- # If ANDROID_NDK_ROOT is set when envsetup is run, use the ndk pointed to by
- # the environment variable. Otherwise, use the default ndk from the tree.
+ # Allow the caller to override a few environment variables. If any of them is
+ # unset, we default to a sane value that's known to work. This allows for
+ # experimentation with a custom SDK.
if [[ -z "${ANDROID_NDK_ROOT}" || ! -d "${ANDROID_NDK_ROOT}" ]]; then
export ANDROID_NDK_ROOT="${CHROME_SRC}/third_party/android_tools/ndk/"
fi
-
- # If ANDROID_SDK_ROOT is set when envsetup is run, and if it has the
- # right SDK-compatible directory layout, use the sdk pointed to by the
- # environment variable. Otherwise, use the default sdk from the tree.
+ if [[ -z "${ANDROID_SDK_VERSION}" ]]; then
+ export ANDROID_SDK_VERSION=18
+ fi
local sdk_suffix=platforms/android-${ANDROID_SDK_VERSION}
if [[ -z "${ANDROID_SDK_ROOT}" || \
! -d "${ANDROID_SDK_ROOT}/${sdk_suffix}" ]]; then
export ANDROID_SDK_ROOT="${CHROME_SRC}/third_party/android_tools/sdk/"
fi
+ if [[ -z "${ANDROID_SDK_BUILD_TOOLS_VERSION}" ]]; then
+ export ANDROID_SDK_BUILD_TOOLS_VERSION=18.0.1
+ fi
unset ANDROID_BUILD_TOP
diff --git a/build/android/findbugs_filter/findbugs_exclude.xml b/build/android/findbugs_filter/findbugs_exclude.xml
index 7b6860d0bb..2f7bde5709 100644
--- a/build/android/findbugs_filter/findbugs_exclude.xml
+++ b/build/android/findbugs_filter/findbugs_exclude.xml
@@ -21,6 +21,14 @@ In particular, ~ at the start of a string means it's a regex.
<Match>
<Class name="~org\.chromium\.content\..*\.NativeLibraries.*?" />
</Match>
+ <!--
+ Ignore bugs in CleanupReferenceTest.java (redundant null check)
+ TODO(joth): Group all GC related tests and filter them out, since the null
+ check is necessary to make sure the nullification is flushed to memory.
+ -->
+ <Match>
+ <Class name="~org\.chromium\.content\..*\.CleanupReferenceTest.*?" />
+ </Match>
<!-- Ignore errors in JavaBridge due to reflection. -->
<Match>
<Class name="~.*\.JavaBridge.*"/>
diff --git a/build/android/generate_emma_html.py b/build/android/generate_emma_html.py
new file mode 100755
index 0000000000..a226343e97
--- /dev/null
+++ b/build/android/generate_emma_html.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Aggregates EMMA coverage files to produce html output."""
+
+import fnmatch
+import json
+import optparse
+import os
+import sys
+import traceback
+
+from pylib import cmd_helper
+from pylib import constants
+
+
+def _GetFilesWithExt(root_dir, ext):
+ """Gets all files with a given extension.
+
+ Args:
+ root_dir: Directory in which to search for files.
+ ext: Extension to look for (including dot)
+
+ Returns:
+ A list of absolute paths to files that match.
+ """
+ files = []
+ for root, _, filenames in os.walk(root_dir):
+ basenames = fnmatch.filter(filenames, '*.' + ext)
+ files.extend([os.path.join(root, basename)
+ for basename in basenames])
+
+ return files
+
+
+def main(argv):
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('-o', '--output', help='HTML output filename.')
+ option_parser.add_option('-c', '--coverage-dir', default=None,
+ help=('Root of the directory in which to search for '
+ 'coverage data (.ec) files.'))
+ option_parser.add_option('-m', '--metadata-dir', default=None,
+ help=('Root of the directory in which to search for '
+ 'coverage metadata (.em) files.'))
+ options, args = option_parser.parse_args()
+
+ if not (options.coverage_dir and options.metadata_dir and options.output):
+ option_parser.error('All arguments are required.')
+
+ coverage_files = _GetFilesWithExt(options.coverage_dir, 'ec')
+ metadata_files = _GetFilesWithExt(options.metadata_dir, 'em')
+ print 'Found coverage files: %s' % str(coverage_files)
+ print 'Found metadata files: %s' % str(metadata_files)
+ sources_files = []
+ final_metadata_files = []
+ err = None
+ for f in metadata_files:
+ sources_file = os.path.splitext(f)[0] + '_sources.txt'
+ # TODO(gkanwar): Remove this once old coverage.em files have been cleaned
+ # from all bots.
+ # Warn if we have old metadata files lying around that don't correspond
+ # to a *_sources.txt (these should be manually cleaned).
+ try:
+ with open(sources_file, 'r') as sf:
+ sources_files.extend(json.load(sf))
+ except IOError as e:
+ traceback.print_exc()
+ err = e
+ else:
+ final_metadata_files.append(f)
+ sources_files = [os.path.join(constants.DIR_SOURCE_ROOT, s)
+ for s in sources_files]
+
+ input_args = []
+ for f in coverage_files + final_metadata_files:
+ input_args.append('-in')
+ input_args.append(f)
+
+ output_args = ['-Dreport.html.out.file', options.output]
+ source_args = ['-sp', ','.join(sources_files)]
+
+ exit_code = cmd_helper.RunCmd(
+ ['java', '-cp',
+ os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'lib', 'emma.jar'),
+ 'emma', 'report', '-r', 'html']
+ + input_args + output_args + source_args)
+
+ if exit_code > 0:
+ return exit_code
+ elif err:
+ return constants.WARNING_EXIT_CODE
+ else:
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/build/android/gyp/ant.py b/build/android/gyp/ant.py
index acf3dccdce..45ed3ed953 100755
--- a/build/android/gyp/ant.py
+++ b/build/android/gyp/ant.py
@@ -3,7 +3,8 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""An Ant wrapper that suppresses useless Ant output
+
+"""An Ant wrapper that suppresses useless Ant output.
Ant build scripts output "BUILD SUCCESSFUL" and build timing at the end of
every build. In the Android build, this just adds a lot of useless noise to the
@@ -27,4 +28,3 @@ def main(argv):
if __name__ == '__main__':
sys.exit(main(sys.argv))
-
diff --git a/build/android/gyp/dex.py b/build/android/gyp/dex.py
index 21e1183e87..f90de95290 100755
--- a/build/android/gyp/dex.py
+++ b/build/android/gyp/dex.py
@@ -4,7 +4,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import fnmatch
import optparse
import os
import sys
@@ -15,7 +14,11 @@ from util import md5_check
def DoDex(options, paths):
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
- dex_cmd = [dx_binary, '--dex', '--output', options.dex_path] + paths
+ # See http://crbug.com/272064 for context on --force-jumbo.
+ dex_cmd = [dx_binary, '--dex', '--force-jumbo', '--output', options.dex_path]
+ if options.no_locals != '0':
+ dex_cmd.append('--no-locals')
+ dex_cmd += paths
record_path = '%s.md5.stamp' % options.dex_path
md5_check.CallAndRecordIfStale(
@@ -33,11 +36,14 @@ def main(argv):
help='Android sdk build tools directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
- help='The build CONFIGURATION_NAME.')
+ help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
- help='"true" if proguard is enabled.')
+ help='"true" if proguard is enabled.')
parser.add_option('--proguard-enabled-input-path',
- help='Path to dex in Release mode when proguard is enabled.')
+ help=('Path to dex in Release mode when proguard '
+ 'is enabled.'))
+ parser.add_option('--no-locals',
+ help='Exclude locals list from the dex file.')
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
@@ -45,8 +51,8 @@ def main(argv):
options, paths = parser.parse_args()
- if (options.proguard_enabled == "true"
- and options.configuration_name == "Release"):
+ if (options.proguard_enabled == 'true'
+ and options.configuration_name == 'Release'):
paths = [options.proguard_enabled_input_path]
DoDex(options, paths)
@@ -57,4 +63,3 @@ def main(argv):
if __name__ == '__main__':
sys.exit(main(sys.argv))
-
diff --git a/build/android/gyp/emma_instr.py b/build/android/gyp/emma_instr.py
new file mode 100755
index 0000000000..8e69f395bc
--- /dev/null
+++ b/build/android/gyp/emma_instr.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Instruments classes and jar files.
+
+This script corresponds to the 'emma_instr' action in the java build process.
+Depending on whether emma_instrument is set, the 'emma_instr' action will either
+call one of the instrument commands, or the copy command.
+
+Possible commands are:
+- instrument_jar: Accepts a jar and instruments it using emma.jar.
+- instrument_classes: Accepts a directory contains java classes and instruments
+ it using emma.jar.
+- copy: Triggered instead of an instrumentation command when we don't have EMMA
+ coverage enabled. This allows us to make this a required step without
+ necessarily instrumenting on every build.
+"""
+
+import collections
+import json
+import os
+import shutil
+import sys
+import tempfile
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+from pylib.utils import command_option_parser
+
+from util import build_utils
+
+
+def _AddCommonOptions(option_parser):
+ """Adds common options to |option_parser|."""
+ option_parser.add_option('--input-path',
+ help=('Path to input file(s). Either the classes '
+ 'directory, or the path to a jar.'))
+ option_parser.add_option('--output-path',
+ help=('Path to output final file(s) to. Either the '
+ 'final classes directory, or the directory in '
+ 'which to place the instrumented/copied jar.'))
+ option_parser.add_option('--stamp', help='Path to touch when done.')
+
+
+def _AddInstrumentOptions(option_parser):
+ """Adds options related to instrumentation to |option_parser|."""
+ _AddCommonOptions(option_parser)
+ option_parser.add_option('--coverage-file',
+ help='File to create with coverage metadata.')
+ option_parser.add_option('--sources-file',
+ help='File to create with the list of sources.')
+ option_parser.add_option('--sources',
+ help='Space separated list of sources.')
+ option_parser.add_option('--src-root',
+ help='Root of the src repository.')
+ option_parser.add_option('--emma-jar',
+ help='Path to emma.jar.')
+
+
+def _RunCopyCommand(command, options, args, option_parser):
+ """Just copies the jar from input to output locations.
+
+ Args:
+ command: String indicating the command that was received to trigger
+ this function.
+ options: optparse options dictionary.
+ args: List of extra args from optparse.
+ option_parser: optparse.OptionParser object.
+
+ Returns:
+ An exit code.
+ """
+ if not (options.input_path and options.output_path):
+ option_parser.error('All arguments are required.')
+
+ if os.path.isdir(options.input_path):
+ shutil.rmtree(options.output_path, ignore_errors=True)
+ shutil.copytree(options.input_path, options.output_path)
+ else:
+ shutil.copy(options.input_path, options.output_path)
+
+ if options.stamp:
+ build_utils.Touch(options.stamp)
+
+
+def _CreateSourcesFile(sources_string, sources_file, src_root):
+ """Adds all normalized source directories to |sources_file|.
+
+ Args:
+ sources_string: String generated from gyp containing the list of sources.
+ sources_file: File into which to write the JSON list of sources.
+ src_root: Root which sources added to the file should be relative to.
+
+ Returns:
+ An exit code.
+ """
+ src_root = os.path.abspath(src_root)
+ sources = build_utils.ParseGypList(sources_string)
+ relative_sources = []
+ for s in sources:
+ abs_source = os.path.abspath(s)
+ if abs_source[:len(src_root)] != src_root:
+ print ('Error: found source directory not under repository root: %s %s'
+ % (abs_source, src_root))
+ return 1
+ rel_source = os.path.relpath(abs_source, src_root)
+
+ relative_sources.append(rel_source)
+
+ with open(sources_file, 'w') as f:
+ json.dump(relative_sources, f)
+
+
+def _RunInstrumentCommand(command, options, args, option_parser):
+ """Instruments the classes/jar files using EMMA.
+
+ Args:
+ command: 'instrument_jar' or 'instrument_classes'. This distinguishes
+ whether we copy the output from the created lib/ directory, or classes/
+ directory.
+ options: optparse options dictionary.
+ args: List of extra args from optparse.
+ option_parser: optparse.OptionParser object.
+
+ Returns:
+ An exit code.
+ """
+ if not (options.input_path and options.output_path and
+ options.coverage_file and options.sources_file and options.sources and
+ options.src_root and options.emma_jar):
+ option_parser.error('All arguments are required.')
+
+ coverage_file = os.path.join(os.path.dirname(options.output_path),
+ options.coverage_file)
+ sources_file = os.path.join(os.path.dirname(options.output_path),
+ options.sources_file)
+ temp_dir = tempfile.mkdtemp()
+ try:
+ # TODO(gkanwar): Add '-ix' option to filter out useless classes.
+ build_utils.CheckCallDie(['java', '-cp', options.emma_jar,
+ 'emma', 'instr',
+ '-ip', options.input_path,
+ '-d', temp_dir,
+ '-out', coverage_file,
+ '-m', 'fullcopy'], suppress_output=True)
+
+ if command == 'instrument_jar':
+ for jar in os.listdir(os.path.join(temp_dir, 'lib')):
+ shutil.copy(os.path.join(temp_dir, 'lib', jar),
+ options.output_path)
+ else: # 'instrument_classes'
+ if os.path.isdir(options.output_path):
+ shutil.rmtree(options.output_path, ignore_errors=True)
+ shutil.copytree(os.path.join(temp_dir, 'classes'),
+ options.output_path)
+ finally:
+ shutil.rmtree(temp_dir)
+
+ _CreateSourcesFile(options.sources, sources_file, options.src_root)
+
+ if options.stamp:
+ build_utils.Touch(options.stamp)
+
+ return 0
+
+
+CommandFunctionTuple = collections.namedtuple(
+ 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
+VALID_COMMANDS = {
+ 'copy': CommandFunctionTuple(_AddCommonOptions,
+ _RunCopyCommand),
+ 'instrument_jar': CommandFunctionTuple(_AddInstrumentOptions,
+ _RunInstrumentCommand),
+ 'instrument_classes': CommandFunctionTuple(_AddInstrumentOptions,
+ _RunInstrumentCommand),
+}
+
+
+def main(argv):
+ option_parser = command_option_parser.CommandOptionParser(
+ commands_dict=VALID_COMMANDS)
+ command_option_parser.ParseAndExecute(option_parser)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/build/android/gyp/proguard.py b/build/android/gyp/proguard.py
new file mode 100755
index 0000000000..6268caff18
--- /dev/null
+++ b/build/android/gyp/proguard.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import optparse
+import os
+import sys
+
+from util import build_utils
+
+def DoProguard(options):
+ injars = options.input_path
+ outjars = options.output_path
+ classpath = build_utils.ParseGypList(options.classpath)
+ classpath = list(set(classpath))
+ libraryjars = ':'.join(classpath)
+ # proguard does its own dependency checking, which can be avoided by deleting
+ # the output.
+ if os.path.exists(options.output_path):
+ os.remove(options.output_path)
+ proguard_cmd = [options.proguard_path,
+ '-injars', injars,
+ '-outjars', outjars,
+ '-libraryjars', libraryjars,
+ '@' + options.proguard_config]
+ build_utils.CheckCallDie(proguard_cmd)
+
+def main(argv):
+ parser = optparse.OptionParser()
+ parser.add_option('--proguard-path',
+ help='Path to the proguard executable.')
+ parser.add_option('--input-path',
+ help='Path to the .jar file proguard should run on.')
+ parser.add_option('--output-path', help='Path to the generated .jar file.')
+ parser.add_option('--proguard-config',
+ help='Path to the proguard configuration file.')
+ parser.add_option('--classpath', help="Classpath for proguard.")
+ parser.add_option('--stamp', help='Path to touch on success.')
+
+ # TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
+ parser.add_option('--ignore', help='Ignored.')
+
+ options, _ = parser.parse_args()
+
+ DoProguard(options)
+
+ if options.stamp:
+ build_utils.Touch(options.stamp)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/build/android/instr_action.gypi b/build/android/instr_action.gypi
new file mode 100644
index 0000000000..b4164f651c
--- /dev/null
+++ b/build/android/instr_action.gypi
@@ -0,0 +1,51 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into an action to provide a rule that
+# instruments either java class files, or jars.
+
+{
+ 'variables': {
+ 'instr_type%': 'jar',
+ 'input_path%': '',
+ 'output_path%': '',
+ 'stamp_path%': '',
+ 'extra_instr_args': [],
+ 'emma_jar': '<(android_sdk_root)/tools/lib/emma.jar',
+ 'conditions': [
+ ['emma_instrument != 0', {
+ 'extra_instr_args': [
+ '--coverage-file=<(_target_name).em',
+ '--sources-file=<(_target_name)_sources.txt',
+ '--sources=<(java_in_dir)/src >(additional_src_dirs) >(generated_src_dirs)',
+ '--src-root=<(DEPTH)',
+ '--emma-jar=<(emma_jar)',
+ ],
+ 'conditions': [
+ ['instr_type == "jar"', {
+ 'instr_action': 'instrument_jar',
+ }, {
+ 'instr_action': 'instrument_classes',
+ }]
+ ],
+ }, {
+ 'instr_action': 'copy',
+ 'extra_instr_args': [],
+ }]
+ ]
+ },
+ 'inputs': [
+ '<(DEPTH)/build/android/gyp/emma_instr.py',
+ '<(DEPTH)/build/android/gyp/util/build_utils.py',
+ '<(DEPTH)/build/android/pylib/utils/command_option_parser.py',
+ ],
+ 'action': [
+ 'python', '<(DEPTH)/build/android/gyp/emma_instr.py',
+ '<(instr_action)',
+ '--input-path=<(input_path)',
+ '--output-path=<(output_path)',
+ '--stamp=<(stamp_path)',
+ '<@(extra_instr_args)',
+ ]
+}
diff --git a/build/android/pylib/android_commands.py b/build/android/pylib/android_commands.py
index f8c074758d..c407ffd692 100644
--- a/build/android/pylib/android_commands.py
+++ b/build/android/pylib/android_commands.py
@@ -459,18 +459,41 @@ class AndroidCommands(object):
def RestartAdbServer(self):
"""Restart the adb server."""
- self.KillAdbServer()
- self.StartAdbServer()
+ ret = self.KillAdbServer()
+ if ret != 0:
+ raise errors.MsgException('KillAdbServer: %d' % ret)
+
+ ret = self.StartAdbServer()
+ if ret != 0:
+ raise errors.MsgException('StartAdbServer: %d' % ret)
def KillAdbServer(self):
"""Kill adb server."""
adb_cmd = [constants.ADB_PATH, 'kill-server']
- return cmd_helper.RunCmd(adb_cmd)
+ ret = cmd_helper.RunCmd(adb_cmd)
+ retry = 0
+ while retry < 3:
+ ret = cmd_helper.RunCmd(['pgrep', 'adb'])
+ if ret != 0:
+ # pgrep didn't find adb, kill-server succeeded.
+ return 0
+ retry += 1
+ time.sleep(retry)
+ return ret
def StartAdbServer(self):
"""Start adb server."""
- adb_cmd = [constants.ADB_PATH, 'start-server']
- return cmd_helper.RunCmd(adb_cmd)
+ adb_cmd = ['taskset', '-c', '0', constants.ADB_PATH, 'start-server']
+ ret = cmd_helper.RunCmd(adb_cmd)
+ retry = 0
+ while retry < 3:
+ ret = cmd_helper.RunCmd(['pgrep', 'adb'])
+ if ret == 0:
+ # pgrep fonud adb, start-server succeeded.
+ return 0
+ retry += 1
+ time.sleep(retry)
+ return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
@@ -612,7 +635,8 @@ class AndroidCommands(object):
return processes_killed
def _GetActivityCommand(self, package, activity, wait_for_completion, action,
- category, data, extras, trace_file_name, force_stop):
+ category, data, extras, trace_file_name, force_stop,
+ flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
@@ -646,13 +670,15 @@ class AndroidCommands(object):
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
+ if flags:
+ cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
- force_stop=False):
+ force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
@@ -670,14 +696,14 @@ class AndroidCommands(object):
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
- trace_file_name, force_stop)
+ trace_file_name, force_stop, flags)
self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
- force_stop=False):
+ force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
@@ -687,7 +713,7 @@ class AndroidCommands(object):
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
- trace_file_name, force_stop)
+ trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
diff --git a/build/android/pylib/base/base_test_runner.py b/build/android/pylib/base/base_test_runner.py
index 9173b729f5..bd8fefeea7 100644
--- a/build/android/pylib/base/base_test_runner.py
+++ b/build/android/pylib/base/base_test_runner.py
@@ -30,13 +30,11 @@ NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device."""
- def __init__(self, device, tool, build_type, push_deps=True,
- cleanup_test_files=False):
+ def __init__(self, device, tool, push_deps=True, cleanup_test_files=False):
"""
Args:
device: Tests will run on the device of this ID.
tool: Name of the Valgrind tool.
- build_type: 'Release' or 'Debug'.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
@@ -55,7 +53,6 @@ class BaseTestRunner(object):
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
- self.build_type = build_type
self._push_deps = push_deps
self._cleanup_test_files = cleanup_test_files
@@ -130,7 +127,7 @@ class BaseTestRunner(object):
def _ForwardPorts(self, port_pairs):
"""Forwards a port."""
- Forwarder.Map(port_pairs, self.adb, self.build_type, self.tool)
+ Forwarder.Map(port_pairs, self.adb, constants.GetBuildType(), self.tool)
def _UnmapPorts(self, port_pairs):
"""Unmap previously forwarded ports."""
@@ -194,8 +191,7 @@ class BaseTestRunner(object):
[(self.test_server_spawner_port, self.test_server_spawner_port)])
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.adb,
- self.tool,
- self.build_type)
+ self.tool)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
diff --git a/build/android/pylib/base/test_dispatcher.py b/build/android/pylib/base/test_dispatcher.py
index a8363c26f7..31fa0784dc 100644
--- a/build/android/pylib/base/test_dispatcher.py
+++ b/build/android/pylib/base/test_dispatcher.py
@@ -4,16 +4,15 @@
"""Dispatches tests, either sharding or replicating them.
-To dispatch, performs the following steps:
+Performs the following steps:
* Create a test collection factory, using the given tests
- If sharding: test collection factory returns the same shared test collection
to all test runners
- If replciating: test collection factory returns a unique test collection to
each test runner, with the same set of tests in each.
-* Get the list of devices to run on
-* Create test runners
-* Run each test runner in its own thread, pulling tests from the test collection
- generated from the test collection factory until there are no tests left.
+* Create a test runner for each device.
+* Run each test runner in its own thread, grabbing tests from the test
+ collection until there are no tests left.
"""
import logging
@@ -304,40 +303,8 @@ def _TearDownRunners(runners, timeout=None):
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
-
-def _GetAttachedDevices(wait_for_debugger=False, test_device=None):
- """Get all attached devices.
-
- If we are using a debugger, limit to only one device.
-
- Args:
- wait_for_debugger: True if this run will use a debugger.
- test_device: Name of a specific device to use.
-
- Returns:
- A list of attached devices.
- """
- attached_devices = []
-
- attached_devices = android_commands.GetAttachedDevices()
- if test_device:
- assert test_device in attached_devices, (
- 'Did not find device %s among attached device. Attached devices: %s'
- % (test_device, ', '.join(attached_devices)))
- attached_devices = [test_device]
-
- if len(attached_devices) > 1 and wait_for_debugger:
- logging.warning('Debugger can not be sharded, using first available device')
- attached_devices = attached_devices[:1]
-
- return attached_devices
-
-
-def RunTests(tests, runner_factory, wait_for_debugger, test_device,
- shard=True,
- build_type='Debug',
- test_timeout=DEFAULT_TIMEOUT,
- setup_timeout=DEFAULT_TIMEOUT,
+def RunTests(tests, runner_factory, devices, shard=True,
+ test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2):
"""Run all tests on attached devices, retrying tests that don't pass.
@@ -345,14 +312,12 @@ def RunTests(tests, runner_factory, wait_for_debugger, test_device,
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
- wait_for_debugger: True if this test is using a debugger.
- test_device: A specific device to run tests on, or None.
+ devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
- build_type: Either 'Debug' or 'Release'.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
@@ -362,7 +327,7 @@ def RunTests(tests, runner_factory, wait_for_debugger, test_device,
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
- logging.error('No tests to run.')
+ logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
if shard:
@@ -379,8 +344,6 @@ def RunTests(tests, runner_factory, wait_for_debugger, test_device,
tag_results_with_device = True
log_string = 'replicated on each device'
- devices = _GetAttachedDevices(wait_for_debugger, test_device)
-
logging.info('Will run %d tests (%s): %s', len(tests), log_string, str(tests))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
diff --git a/build/android/pylib/base/test_dispatcher_unittest.py b/build/android/pylib/base/test_dispatcher_unittest.py
index 253826c03f..88dfda69f0 100644
--- a/build/android/pylib/base/test_dispatcher_unittest.py
+++ b/build/android/pylib/base/test_dispatcher_unittest.py
@@ -167,7 +167,7 @@ class TestShard(unittest.TestCase):
@staticmethod
def _RunShard(runner_factory):
return test_dispatcher.RunTests(
- ['a', 'b', 'c'], runner_factory, False, None, shard=True)
+ ['a', 'b', 'c'], runner_factory, ['0', '1'], shard=True)
def testShard(self):
results, exit_code = TestShard._RunShard(MockRunner)
@@ -182,7 +182,7 @@ class TestShard(unittest.TestCase):
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
- [], MockRunner, False, None, shard=True)
+ [], MockRunner, ['0', '1'], shard=True)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
@@ -192,7 +192,7 @@ class TestReplicate(unittest.TestCase):
@staticmethod
def _RunReplicate(runner_factory):
return test_dispatcher.RunTests(
- ['a', 'b', 'c'], runner_factory, False, None, shard=False)
+ ['a', 'b', 'c'], runner_factory, ['0', '1'], shard=False)
def testReplicate(self):
results, exit_code = TestReplicate._RunReplicate(MockRunner)
@@ -208,7 +208,7 @@ class TestReplicate(unittest.TestCase):
def testNoTests(self):
results, exit_code = test_dispatcher.RunTests(
- [], MockRunner, False, None, shard=False)
+ [], MockRunner, ['0', '1'], shard=False)
self.assertEqual(len(results.GetAll()), 0)
self.assertEqual(exit_code, constants.ERROR_EXIT_CODE)
diff --git a/build/android/pylib/chrome_test_server_spawner.py b/build/android/pylib/chrome_test_server_spawner.py
index 0ff7997359..b15f2917b1 100644
--- a/build/android/pylib/chrome_test_server_spawner.py
+++ b/build/android/pylib/chrome_test_server_spawner.py
@@ -100,7 +100,7 @@ def _GetServerTypeCommandLine(server_type):
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
- def __init__(self, ready_event, arguments, adb, tool, build_type):
+ def __init__(self, ready_event, arguments, adb, tool):
"""Initialize TestServerThread with the following argument.
Args:
@@ -108,7 +108,6 @@ class TestServerThread(threading.Thread):
arguments: dictionary of arguments to run the test server.
adb: instance of AndroidCommands.
tool: instance of runtime error detection tool.
- build_type: 'Release' or 'Debug'.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
@@ -128,7 +127,6 @@ class TestServerThread(threading.Thread):
self.pipe_in = None
self.pipe_out = None
self.command_line = []
- self.build_type = build_type
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
@@ -251,7 +249,8 @@ class TestServerThread(threading.Thread):
else:
self.is_ready = _CheckPortStatus(self.host_port, True)
if self.is_ready:
- Forwarder.Map([(0, self.host_port)], self.adb, self.build_type, self.tool)
+ Forwarder.Map([(0, self.host_port)], self.adb, constants.GetBuildType(),
+ self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = Forwarder.DevicePortForHostPort(self.host_port)
@@ -333,8 +332,7 @@ class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
ready_event,
json.loads(test_server_argument_json),
self.server.adb,
- self.server.tool,
- self.server.build_type)
+ self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
@@ -401,14 +399,14 @@ class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
class SpawningServer(object):
"""The class used to start/stop a http server."""
- def __init__(self, test_server_spawner_port, adb, tool, build_type):
+ def __init__(self, test_server_spawner_port, adb, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.adb = adb
self.server.tool = tool
self.server.test_server_instance = None
- self.server.build_type = build_type
+ self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
diff --git a/build/android/pylib/constants.py b/build/android/pylib/constants.py
index 6cbe727c93..59ffcd421e 100644
--- a/build/android/pylib/constants.py
+++ b/build/android/pylib/constants.py
@@ -14,11 +14,13 @@ DIR_SOURCE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
ISOLATE_DEPS_DIR = os.path.join(DIR_SOURCE_ROOT, 'isolate_deps_dir')
EMULATOR_SDK_ROOT = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, os.pardir,
os.pardir))
-
CHROME_PACKAGE = 'com.google.android.apps.chrome'
CHROME_ACTIVITY = 'com.google.android.apps.chrome.Main'
CHROME_DEVTOOLS_SOCKET = 'chrome_devtools_remote'
+CHROME_STABLE_PACKAGE = 'com.android.chrome'
+CHROME_BETA_PACKAGE = 'com.chrome.beta'
+
CHROME_TESTS_PACKAGE = 'com.google.android.apps.chrome.tests'
LEGACY_BROWSER_PACKAGE = 'com.google.android.browser'
@@ -52,6 +54,7 @@ LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
+TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
# The net test server is started from port 10201.
# TODO(pliard): http://crbug.com/239014. Remove this dirty workaround once
@@ -83,6 +86,17 @@ ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
+def GetBuildType():
+ try:
+ return os.environ['BUILDTYPE']
+ except KeyError:
+ raise Exception('The BUILDTYPE environment variable has not been set')
+
+
+def SetBuildType(build_type):
+ os.environ['BUILDTYPE'] = build_type
+
+
def _GetADBPath():
if os.environ.get('ANDROID_SDK_ROOT'):
return 'adb'
diff --git a/build/android/pylib/device_stats_monitor.py b/build/android/pylib/device_stats_monitor.py
index 000632b122..482965f72e 100644
--- a/build/android/pylib/device_stats_monitor.py
+++ b/build/android/pylib/device_stats_monitor.py
@@ -29,10 +29,11 @@ class DeviceStatsMonitor(object):
RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
- def __init__(self, adb, hz, build_type):
+ def __init__(self, adb, hz):
self._adb = adb
host_path = os.path.abspath(os.path.join(
- constants.DIR_SOURCE_ROOT, 'out', build_type, 'device_stats_monitor'))
+ constants.DIR_SOURCE_ROOT, 'out', constants.GetBuildType(),
+ 'device_stats_monitor'))
self._adb.PushIfNeeded(host_path, DeviceStatsMonitor.DEVICE_PATH)
self._hz = hz
diff --git a/build/android/pylib/fake_dns.py b/build/android/pylib/fake_dns.py
index b119d7600c..ed99d20fb3 100644
--- a/build/android/pylib/fake_dns.py
+++ b/build/android/pylib/fake_dns.py
@@ -14,14 +14,12 @@ class FakeDns(object):
"""Wrapper class for the fake_dns tool."""
_FAKE_DNS_PATH = constants.TEST_EXECUTABLE_DIR + '/fake_dns'
- def __init__(self, adb, build_type):
+ def __init__(self, adb):
"""
Args:
adb: the AndroidCommands to use.
- build_type: 'Release' or 'Debug'.
"""
self._adb = adb
- self._build_type = build_type
self._fake_dns = None
self._original_dns = None
@@ -32,7 +30,7 @@ class FakeDns(object):
subprocess instance connected to the fake_dns process on the device.
"""
self._adb.PushIfNeeded(
- os.path.join(constants.DIR_SOURCE_ROOT, 'out', self._build_type,
+ os.path.join(constants.DIR_SOURCE_ROOT, 'out', constants.GetBuildType(),
'fake_dns'),
FakeDns._FAKE_DNS_PATH)
return subprocess.Popen(
diff --git a/build/android/pylib/gtest/gtest_config.py b/build/android/pylib/gtest/gtest_config.py
index f77d8580c8..64cdbaf951 100644
--- a/build/android/pylib/gtest/gtest_config.py
+++ b/build/android/pylib/gtest/gtest_config.py
@@ -12,6 +12,7 @@ EXPERIMENTAL_TEST_SUITES = [
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
+ 'content_browsertests',
'android_webview_unittests',
'base_unittests',
'cc_unittests',
@@ -29,7 +30,6 @@ STABLE_TEST_SUITES = [
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
- 'content_browsertests',
]
WEBRTC_TEST_SUITES = [
@@ -37,7 +37,7 @@ WEBRTC_TEST_SUITES = [
'common_audio_unittests',
'common_video_unittests',
'metrics_unittests',
- 'modules_integrationtests',
+ 'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
diff --git a/build/android/pylib/gtest/setup.py b/build/android/pylib/gtest/setup.py
index 2e70dd07f3..f139c97908 100644
--- a/build/android/pylib/gtest/setup.py
+++ b/build/android/pylib/gtest/setup.py
@@ -15,9 +15,7 @@ from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import ports
-from pylib.base import base_test_result
-import gtest_config
import test_package_apk
import test_package_exe
import test_runner
@@ -69,14 +67,14 @@ _ISOLATE_SCRIPT = os.path.join(
constants.DIR_SOURCE_ROOT, 'tools', 'swarm_client', 'isolate.py')
-def _GenerateDepsDirUsingIsolate(suite_name, build_type):
+def _GenerateDepsDirUsingIsolate(suite_name):
"""Generate the dependency dir for the test suite using isolate.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
- build_type: Release/Debug
"""
- product_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
+ product_dir = os.path.join(cmd_helper.OutDirectory.get(),
+ constants.GetBuildType())
assert os.path.isabs(product_dir)
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
@@ -142,7 +140,8 @@ def _GenerateDepsDirUsingIsolate(suite_name, build_type):
shutil.move(os.path.join(root, filename), paks_dir)
# Move everything in PRODUCT_DIR to top level.
- deps_product_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'out', build_type)
+ deps_product_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'out',
+ constants.GetBuildType())
if os.path.isdir(deps_product_dir):
for p in os.listdir(deps_product_dir):
shutil.move(os.path.join(deps_product_dir, p), constants.ISOLATE_DEPS_DIR)
@@ -257,11 +256,12 @@ def _GetTestsFiltered(suite_name, gtest_filter, runner_factory, devices):
return tests
-def Setup(test_options):
+def Setup(test_options, devices):
"""Create the test runner factory and tests.
Args:
test_options: A GTestOptions object.
+ devices: A list of attached devices.
Returns:
A tuple of (TestRunnerFactory, tests).
@@ -270,19 +270,17 @@ def Setup(test_options):
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
- test_package = test_package_apk.TestPackageApk(test_options.suite_name,
- test_options.build_type)
+ test_package = test_package_apk.TestPackageApk(test_options.suite_name)
if not os.path.exists(test_package.suite_path):
test_package = test_package_exe.TestPackageExecutable(
- test_options.suite_name, test_options.build_type)
+ test_options.suite_name)
if not os.path.exists(test_package.suite_path):
raise Exception(
'Did not find %s target. Ensure it has been built.'
% test_options.suite_name)
logging.warning('Found target %s', test_package.suite_path)
- _GenerateDepsDirUsingIsolate(test_options.suite_name,
- test_options.build_type)
+ _GenerateDepsDirUsingIsolate(test_options.suite_name)
# Constructs a new TestRunner with the current options.
def TestRunnerFactory(device, shard_index):
@@ -291,12 +289,11 @@ def Setup(test_options):
device,
test_package)
- attached_devices = android_commands.GetAttachedDevices()
tests = _GetTestsFiltered(test_options.suite_name, test_options.gtest_filter,
- TestRunnerFactory, attached_devices)
+ TestRunnerFactory, devices)
# Coalesce unit tests into a single test per device
if test_options.suite_name != 'content_browsertests':
- num_devices = len(attached_devices)
+ num_devices = len(devices)
tests = [':'.join(tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
diff --git a/build/android/pylib/gtest/test_options.py b/build/android/pylib/gtest/test_options.py
index c4146713f2..4a9ce12f1d 100644
--- a/build/android/pylib/gtest/test_options.py
+++ b/build/android/pylib/gtest/test_options.py
@@ -7,7 +7,6 @@
import collections
GTestOptions = collections.namedtuple('GTestOptions', [
- 'build_type',
'tool',
'cleanup_test_files',
'push_deps',
diff --git a/build/android/pylib/gtest/test_package_apk.py b/build/android/pylib/gtest/test_package_apk.py
index 7fb55f63a4..5944147bdc 100644
--- a/build/android/pylib/gtest/test_package_apk.py
+++ b/build/android/pylib/gtest/test_package_apk.py
@@ -23,14 +23,14 @@ from test_package import TestPackage
class TestPackageApk(TestPackage):
"""A helper class for running APK-based native tests."""
- def __init__(self, suite_name, build_type):
+ def __init__(self, suite_name):
"""
Args:
suite_name: Name of the test suite (e.g. base_unittests).
- build_type: 'Release' or 'Debug'.
"""
TestPackage.__init__(self, suite_name)
- product_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
+ product_dir = os.path.join(cmd_helper.OutDirectory.get(),
+ constants.GetBuildType())
if suite_name == 'content_browsertests':
self.suite_path = os.path.join(
product_dir, 'apks', '%s.apk' % suite_name)
diff --git a/build/android/pylib/gtest/test_package_exe.py b/build/android/pylib/gtest/test_package_exe.py
index f0238b81aa..7aa39d8329 100644
--- a/build/android/pylib/gtest/test_package_exe.py
+++ b/build/android/pylib/gtest/test_package_exe.py
@@ -22,14 +22,14 @@ class TestPackageExecutable(TestPackage):
_TEST_RUNNER_RET_VAL_FILE = 'gtest_retval'
- def __init__(self, suite_name, build_type):
+ def __init__(self, suite_name):
"""
Args:
suite_name: Name of the test suite (e.g. base_unittests).
- build_type: 'Release' or 'Debug'.
"""
TestPackage.__init__(self, suite_name)
- product_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
+ product_dir = os.path.join(cmd_helper.OutDirectory.get(),
+ constants.GetBuildType())
self.suite_path = os.path.join(product_dir, suite_name)
self._symbols_dir = os.path.join(product_dir, 'lib.target')
diff --git a/build/android/pylib/gtest/test_runner.py b/build/android/pylib/gtest/test_runner.py
index 8db2f83450..7312c3694c 100644
--- a/build/android/pylib/gtest/test_runner.py
+++ b/build/android/pylib/gtest/test_runner.py
@@ -33,7 +33,6 @@ class TestRunner(base_test_runner.BaseTestRunner):
"""
super(TestRunner, self).__init__(device, test_options.tool,
- test_options.build_type,
test_options.push_deps,
test_options.cleanup_test_files)
diff --git a/build/android/pylib/host_driven/setup.py b/build/android/pylib/host_driven/setup.py
index ae7860a8a0..a087bf1484 100644
--- a/build/android/pylib/host_driven/setup.py
+++ b/build/android/pylib/host_driven/setup.py
@@ -196,7 +196,6 @@ def InstrumentationSetup(host_driven_test_root, official_build,
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool,
- instrumentation_options.build_type,
instrumentation_options.push_deps,
instrumentation_options.cleanup_test_files)
diff --git a/build/android/pylib/host_driven/test_case.py b/build/android/pylib/host_driven/test_case.py
index 2bf5f0a31c..3387c948bd 100644
--- a/build/android/pylib/host_driven/test_case.py
+++ b/build/android/pylib/host_driven/test_case.py
@@ -15,8 +15,9 @@ which the test method will run against. The test runner runs the test method
itself, collecting the result, and calls TearDown.
Tests can perform arbitrary Python commands and asserts in test methods. Tests
-that run instrumentation tests can make use of the _RunJavaTests helper function
-to trigger Java tests and convert results into a single host-driven test result.
+that run instrumentation tests can make use of the _RunJavaTestFilters helper
+function to trigger Java tests and convert results into a single host-driven
+test result.
"""
import logging
@@ -24,6 +25,7 @@ import os
import time
from pylib import android_commands
+from pylib import constants
from pylib.base import base_test_result
from pylib.instrumentation import test_package
from pylib.instrumentation import test_result
@@ -55,11 +57,10 @@ class HostDrivenTestCase(object):
self.instrumentation_options = instrumentation_options
self.ports_to_forward = []
- def SetUp(self, device, shard_index, build_type, push_deps,
+ def SetUp(self, device, shard_index, push_deps,
cleanup_test_files):
self.device_id = device
self.shard_index = shard_index
- self.build_type = build_type
self.adb = android_commands.AndroidCommands(self.device_id)
self.push_deps = push_deps
self.cleanup_test_files = cleanup_test_files
@@ -69,29 +70,23 @@ class HostDrivenTestCase(object):
def GetOutDir(self):
return os.path.join(os.environ['CHROME_SRC'], 'out',
- self.build_type)
+ constants.GetBuildType())
def Run(self):
logging.info('Running host-driven test: %s', self.tagged_name)
# Get the test method on the derived class and execute it
return getattr(self, self.test_name)()
- def __RunJavaTest(self, package_name, test_case, test_method):
- """Runs a single Java test method with a Java TestRunner.
+ def __RunJavaTest(self, test, test_pkg):
+ """Runs a single Java test in a Java TestRunner.
Args:
- package_name: Package name in which the java tests live
- (e.g. foo.bar.baz.tests)
- test_case: Name of the Java test case (e.g. FooTest)
- test_method: Name of the test method to run (e.g. testFooBar)
+ test: Fully qualified test name (ex. foo.bar.TestClass#testMethod)
+ test_pkg: TestPackage object.
Returns:
TestRunResults object with a single test result.
"""
- test = '%s.%s#%s' % (package_name, test_case, test_method)
- test_pkg = test_package.TestPackage(
- self.instrumentation_options.test_apk_path,
- self.instrumentation_options.test_apk_jar_path)
java_test_runner = test_runner.TestRunner(self.instrumentation_options,
self.device_id,
self.shard_index, test_pkg,
@@ -102,7 +97,7 @@ class HostDrivenTestCase(object):
finally:
java_test_runner.TearDown()
- def _RunJavaTests(self, package_name, tests):
+ def _RunJavaTestFilters(self, test_filters):
"""Calls a list of tests and stops at the first test failure.
This method iterates until either it encounters a non-passing test or it
@@ -113,9 +108,7 @@ class HostDrivenTestCase(object):
being defined.
Args:
- package_name: Package name in which the java tests live
- (e.g. foo.bar.baz.tests)
- tests: A list of Java test names which will be run
+ test_filters: A list of Java test filters.
Returns:
A TestRunResults object containing an overall result for this set of Java
@@ -124,17 +117,30 @@ class HostDrivenTestCase(object):
test_type = base_test_result.ResultType.PASS
log = ''
+ test_pkg = test_package.TestPackage(
+ self.instrumentation_options.test_apk_path,
+ self.instrumentation_options.test_apk_jar_path)
+
start_ms = int(time.time()) * 1000
- for test in tests:
- # We're only running one test at a time, so this TestRunResults object
- # will hold only one result.
- suite, test_name = test.split('.')
- java_result = self.__RunJavaTest(package_name, suite, test_name)
- assert len(java_result.GetAll()) == 1
- if not java_result.DidRunPass():
- result = java_result.GetNotPass().pop()
- log = result.GetLog()
- test_type = result.GetType()
+ done = False
+ for test_filter in test_filters:
+ tests = test_pkg._GetAllMatchingTests(None, None, test_filter)
+ # Filters should always result in >= 1 test.
+ if len(tests) == 0:
+ raise Exception('Java test filter "%s" returned no tests.'
+ % test_filter)
+ for test in tests:
+ # We're only running one test at a time, so this TestRunResults object
+ # will hold only one result.
+ java_result = self.__RunJavaTest(test, test_pkg)
+ assert len(java_result.GetAll()) == 1
+ if not java_result.DidRunPass():
+ result = java_result.GetNotPass().pop()
+ log = result.GetLog()
+ test_type = result.GetType()
+ done = True
+ break
+ if done:
break
duration_ms = int(time.time()) * 1000 - start_ms
diff --git a/build/android/pylib/host_driven/test_runner.py b/build/android/pylib/host_driven/test_runner.py
index 9a9acdd53a..53fd70f3da 100644
--- a/build/android/pylib/host_driven/test_runner.py
+++ b/build/android/pylib/host_driven/test_runner.py
@@ -49,7 +49,7 @@ class HostDrivenTestRunner(base_test_runner.BaseTestRunner):
"""
#override
- def __init__(self, device, shard_index, tool, build_type, push_deps,
+ def __init__(self, device, shard_index, tool, push_deps,
cleanup_test_files):
"""Creates a new HostDrivenTestRunner.
@@ -57,13 +57,12 @@ class HostDrivenTestRunner(base_test_runner.BaseTestRunner):
device: Attached android device.
shard_index: Shard index.
tool: Name of the Valgrind tool.
- build_type: 'Release' or 'Debug'.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
- super(HostDrivenTestRunner, self).__init__(device, tool, build_type,
- push_deps, cleanup_test_files)
+ super(HostDrivenTestRunner, self).__init__(device, tool, push_deps,
+ cleanup_test_files)
# The shard index affords the ability to create unique port numbers (e.g.
# DEFAULT_PORT + shard_index) if the test so wishes.
@@ -87,8 +86,8 @@ class HostDrivenTestRunner(base_test_runner.BaseTestRunner):
exception_raised = False
try:
- test.SetUp(self.device, self.shard_index, self.build_type,
- self._push_deps, self._cleanup_test_files)
+ test.SetUp(self.device, self.shard_index, self._push_deps,
+ self._cleanup_test_files)
except Exception:
logging.exception(
'Caught exception while trying to run SetUp() for test: ' +
diff --git a/build/android/pylib/host_driven/tests_annotations.py b/build/android/pylib/host_driven/tests_annotations.py
index 2654e325f1..d5f557de8e 100644
--- a/build/android/pylib/host_driven/tests_annotations.py
+++ b/build/android/pylib/host_driven/tests_annotations.py
@@ -88,6 +88,6 @@ def DisabledTest(function):
def Feature(feature_list):
def _AddFeatures(function):
for feature in feature_list:
- AnnotatedFunctions._AddFunction('Feature' + feature, function)
+ AnnotatedFunctions._AddFunction('Feature:%s' % feature, function)
return AnnotatedFunctions._AddFunction('Feature', function)
return _AddFeatures
diff --git a/build/android/pylib/instrumentation/test_options.py b/build/android/pylib/instrumentation/test_options.py
index 0c9ac005ba..4077ba12c7 100644
--- a/build/android/pylib/instrumentation/test_options.py
+++ b/build/android/pylib/instrumentation/test_options.py
@@ -7,7 +7,6 @@
import collections
InstrumentationOptions = collections.namedtuple('InstrumentationOptions', [
- 'build_type',
'tool',
'cleanup_test_files',
'push_deps',
@@ -18,6 +17,7 @@ InstrumentationOptions = collections.namedtuple('InstrumentationOptions', [
'save_perf_json',
'screenshot_failures',
'wait_for_debugger',
+ 'coverage_dir',
'test_apk',
'test_apk_path',
'test_apk_jar_path'])
diff --git a/build/android/pylib/instrumentation/test_runner.py b/build/android/pylib/instrumentation/test_runner.py
index 5e2b67ece8..73440f53c5 100644
--- a/build/android/pylib/instrumentation/test_runner.py
+++ b/build/android/pylib/instrumentation/test_runner.py
@@ -44,6 +44,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
"""Responsible for running a series of tests connected to a single device."""
_DEVICE_DATA_DIR = 'chrome/test/data'
+ _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
_HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
_DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
'/chrome-profile*')
@@ -62,7 +63,6 @@ class TestRunner(base_test_runner.BaseTestRunner):
Can be optionally requested by a test case.
"""
super(TestRunner, self).__init__(device, test_options.tool,
- test_options.build_type,
test_options.push_deps,
test_options.cleanup_test_files)
self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
@@ -70,6 +70,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
self.options = test_options
self.test_pkg = test_pkg
self.ports_to_forward = ports_to_forward
+ self.coverage_dir = test_options.coverage_dir
#override
def InstallTestPackage(self):
@@ -99,11 +100,11 @@ class TestRunner(base_test_runner.BaseTestRunner):
dst_src = dest_host_pair.split(':',1)
dst_layer = dst_src[0]
host_src = dst_src[1]
- host_test_files_path = constants.DIR_SOURCE_ROOT + '/' + host_src
+ host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src)
if os.path.exists(host_test_files_path):
- self.adb.PushIfNeeded(host_test_files_path,
- self.adb.GetExternalStorage() + '/' +
- TestRunner._DEVICE_DATA_DIR + '/' + dst_layer)
+ self.adb.PushIfNeeded(host_test_files_path, '%s/%s/%s' % (
+ self.adb.GetExternalStorage(), TestRunner._DEVICE_DATA_DIR,
+ dst_layer))
self.tool.CopyFiles()
TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
@@ -111,11 +112,15 @@ class TestRunner(base_test_runner.BaseTestRunner):
ret = {}
if self.options.wait_for_debugger:
ret['debug'] = 'true'
+ if self.coverage_dir:
+ ret['coverage'] = 'true'
+ ret['coverageFile'] = self.coverage_device_file
+
return ret
def _TakeScreenshot(self, test):
"""Takes a screenshot from the device."""
- screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png')
+ screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
logging.info('Taking screenshot named %s', screenshot_name)
self.adb.TakeScreenshot(screenshot_name)
@@ -157,6 +162,14 @@ class TestRunner(base_test_runner.BaseTestRunner):
# Make sure the forwarder is still running.
self._RestartHttpServerForwarderIfNecessary()
+ if self.coverage_dir:
+ coverage_basename = '%s.ec' % test
+ self.coverage_device_file = '%s/%s/%s' % (self.adb.GetExternalStorage(),
+ TestRunner._DEVICE_COVERAGE_DIR,
+ coverage_basename)
+ self.coverage_host_file = os.path.join(
+ self.coverage_dir, coverage_basename)
+
def _IsPerfTest(self, test):
"""Determines whether a test is a performance test.
@@ -199,6 +212,10 @@ class TestRunner(base_test_runner.BaseTestRunner):
self.TearDownPerfMonitoring(test)
+ if self.coverage_dir:
+ self.adb.Adb().Pull(self.coverage_device_file, self.coverage_host_file)
+ self.adb.RunShellCommand('rm -f %s' % self.coverage_device_file)
+
def TearDownPerfMonitoring(self, test):
"""Cleans up performance monitoring if the specified test required it.
diff --git a/build/android/pylib/monkey/test_options.py b/build/android/pylib/monkey/test_options.py
index 6b095f3ec6..31a91bb979 100644
--- a/build/android/pylib/monkey/test_options.py
+++ b/build/android/pylib/monkey/test_options.py
@@ -7,7 +7,6 @@
import collections
MonkeyOptions = collections.namedtuple('MonkeyOptions', [
- 'build_type',
'verbose_count',
'package_name',
'activity_name',
diff --git a/build/android/pylib/monkey/test_runner.py b/build/android/pylib/monkey/test_runner.py
index 99bc2e681b..6db9ea9dfc 100644
--- a/build/android/pylib/monkey/test_runner.py
+++ b/build/android/pylib/monkey/test_runner.py
@@ -14,7 +14,7 @@ class TestRunner(base_test_runner.BaseTestRunner):
"""A TestRunner instance runs a monkey test on a single device."""
def __init__(self, test_options, device, shard_index):
- super(TestRunner, self).__init__(device, None, test_options.build_type)
+ super(TestRunner, self).__init__(device, None)
self.options = test_options
def _LaunchMonkeyTest(self):
diff --git a/build/android/pylib/perf/__init__.py b/build/android/pylib/perf/__init__.py
new file mode 100644
index 0000000000..9228df89b0
--- /dev/null
+++ b/build/android/pylib/perf/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/build/android/pylib/perf/setup.py b/build/android/pylib/perf/setup.py
new file mode 100644
index 0000000000..5d0272d2f7
--- /dev/null
+++ b/build/android/pylib/perf/setup.py
@@ -0,0 +1,74 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generates test runner factory and tests for performance tests."""
+
+import json
+import logging
+import os
+import psutil
+import signal
+import time
+
+from pylib import android_commands
+from pylib import cmd_helper
+from pylib import forwarder
+from pylib import ports
+
+import test_runner
+
+
+def _KillPendingServers():
+ for retry in range(5):
+ for server in ['lighttpd', 'web-page-replay']:
+ pids = [p.pid for p in psutil.process_iter() if server in p.name]
+ for pid in pids:
+ try:
+ logging.warning('Killing %s %s', server, pid)
+ os.kill(pid, signal.SIGQUIT)
+ except Exception as e:
+ logging.warning('Failed killing %s %s %s', server, pid, e)
+ # Restart the adb server with taskset to set a single CPU affinity.
+ cmd_helper.RunCmd(['adb', 'kill-server'])
+ cmd_helper.RunCmd(['taskset', '-c', '0', 'adb', 'start-server'])
+ cmd_helper.RunCmd(['taskset', '-c', '0', 'adb', 'root'])
+ i = 1
+ while not android_commands.GetAttachedDevices():
+ time.sleep(i)
+ i *= 2
+ if i > 10:
+ break
+ # Reset the test port allocation. It's important to do it before starting
+ # to dispatch any step.
+ if not ports.ResetTestServerPortAllocation():
+ raise Exception('Failed to reset test server port.')
+
+ forwarder.Forwarder.UseMultiprocessing()
+
+
+def Setup(test_options):
+ """Create and return the test runner factory and tests.
+
+ Args:
+ test_options: A PerformanceOptions object.
+
+ Returns:
+ A tuple of (TestRunnerFactory, tests).
+ """
+ # Before running the tests, kill any leftover server.
+ _KillPendingServers()
+
+ with file(test_options.steps, 'r') as f:
+ tests = json.load(f)
+
+ flaky_steps = []
+ if test_options.flaky_steps:
+ with file(test_options.flaky_steps, 'r') as f:
+ flaky_steps = json.load(f)
+
+ def TestRunnerFactory(device, shard_index):
+ return test_runner.TestRunner(
+ test_options, device, tests, flaky_steps)
+
+ return (TestRunnerFactory, sorted(tests.keys()))
diff --git a/build/android/pylib/perf/test_options.py b/build/android/pylib/perf/test_options.py
new file mode 100644
index 0000000000..6664ba2661
--- /dev/null
+++ b/build/android/pylib/perf/test_options.py
@@ -0,0 +1,13 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines the PerfOptions named tuple."""
+
+import collections
+
+PerfOptions = collections.namedtuple('PerfOptions', [
+ 'steps',
+ 'flaky_steps',
+ 'print_step',
+])
diff --git a/build/android/pylib/perf/test_runner.py b/build/android/pylib/perf/test_runner.py
new file mode 100644
index 0000000000..bf09dfe00e
--- /dev/null
+++ b/build/android/pylib/perf/test_runner.py
@@ -0,0 +1,155 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs a perf test on a single device.
+
+Our buildbot infrastructure requires each slave to run steps serially.
+This is sub-optimal for android, where these steps can run independently on
+multiple connected devices.
+
+The buildbots will run this script multiple times per cycle:
+- First: all steps listed in --steps in will be executed in parallel using all
+connected devices. Step results will be pickled to disk. Each step has a unique
+name. The result code will be ignored if the step name is listed in
+--flaky-steps.
+The buildbot will treat this step as a regular step, and will not process any
+graph data.
+
+- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
+with the step results previously saved. The buildbot will then process the graph
+data accordingly.
+
+The JSON steps file contains a dictionary in the format:
+{
+ "step_name_foo": "script_to_execute foo",
+ "step_name_bar": "script_to_execute bar"
+}
+
+The JSON flaky steps file contains a list with step names which results should
+be ignored:
+[
+ "step_name_foo",
+ "step_name_bar"
+]
+
+Note that script_to_execute necessarily have to take at least the following
+options:
+ --device: the serial number to be passed to all adb commands.
+ --keep_test_server_ports: indicates it's being run as a shard, and shouldn't
+ reset test server port allocation.
+"""
+
+import datetime
+import logging
+import pexpect
+import pickle
+import os
+import sys
+import time
+
+from pylib import constants
+from pylib.base import base_test_result
+from pylib.base import base_test_runner
+
+
+_OUTPUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out', 'step_results')
+
+
+def PrintTestOutput(test_name):
+ """Helper method to print the output of previously executed test_name.
+
+ Args:
+ test_name: name of the test that has been previously executed.
+
+ Returns:
+ exit code generated by the test step.
+ """
+ file_name = os.path.join(_OUTPUT_DIR, test_name)
+ if not os.path.exists(file_name):
+ logging.error('File not found %s', file_name)
+ return 1
+
+ with file(file_name, 'r') as f:
+ persisted_result = pickle.loads(f.read())
+ print persisted_result['output']
+
+ return persisted_result['exit_code']
+
+
+class TestRunner(base_test_runner.BaseTestRunner):
+ def __init__(self, test_options, device, tests, flaky_tests):
+ """A TestRunner instance runs a perf test on a single device.
+
+ Args:
+ test_options: A PerfOptions object.
+ device: Device to run the tests.
+ tests: a dict mapping test_name to command.
+ flaky_tests: a list of flaky test_name.
+ """
+ super(TestRunner, self).__init__(device, None, 'Release')
+ self._options = test_options
+ self._tests = tests
+ self._flaky_tests = flaky_tests
+
+ @staticmethod
+ def _SaveResult(result):
+ with file(os.path.join(_OUTPUT_DIR, result['name']), 'w') as f:
+ f.write(pickle.dumps(result))
+
+ def _LaunchPerfTest(self, test_name):
+ """Runs a perf test.
+
+ Args:
+ test_name: the name of the test to be executed.
+
+ Returns:
+ A tuple containing (Output, base_test_result.ResultType)
+ """
+ cmd = ('%s --device %s --keep_test_server_ports' %
+ (self._tests[test_name], self.device))
+ logging.info('%s : %s', test_name, cmd)
+ start_time = datetime.datetime.now()
+ output, exit_code = pexpect.run(
+ cmd, cwd=os.path.abspath(constants.DIR_SOURCE_ROOT),
+ withexitstatus=True, logfile=sys.stdout, timeout=1800,
+ env=os.environ)
+ end_time = datetime.datetime.now()
+ logging.info('%s : exit_code=%d in %d secs at %s',
+ test_name, exit_code, (end_time - start_time).seconds,
+ self.device)
+ result_type = base_test_result.ResultType.FAIL
+ if exit_code == 0:
+ result_type = base_test_result.ResultType.PASS
+ if test_name in self._flaky_tests:
+ exit_code = 0
+ result_type = base_test_result.ResultType.PASS
+
+ persisted_result = {
+ 'name': test_name,
+ 'output': output,
+ 'exit_code': exit_code,
+ 'result_type': result_type,
+ 'total_time': (end_time - start_time).seconds,
+ 'device': self.device,
+ }
+ self._SaveResult(persisted_result)
+
+ return (output, result_type)
+
+ def RunTest(self, test_name):
+ """Run a perf test on the device.
+
+ Args:
+ test_name: String to use for logging the test result.
+
+ Returns:
+ A tuple of (TestRunResults, retry).
+ """
+ output, result_type = self._LaunchPerfTest(test_name)
+ results = base_test_result.TestRunResults()
+ results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
+ retry = None
+ if not results.DidRunPass():
+ retry = test_name
+ return results, retry
diff --git a/build/android/pylib/uiautomator/test_options.py b/build/android/pylib/uiautomator/test_options.py
index 2ce5eb0080..99098a18ca 100644
--- a/build/android/pylib/uiautomator/test_options.py
+++ b/build/android/pylib/uiautomator/test_options.py
@@ -7,7 +7,6 @@
import collections
UIAutomatorOptions = collections.namedtuple('UIAutomatorOptions', [
- 'build_type',
'tool',
'cleanup_test_files',
'push_deps',
diff --git a/build/android/pylib/uiautomator/test_runner.py b/build/android/pylib/uiautomator/test_runner.py
index 58cdd45dd0..fcd2cc61f9 100644
--- a/build/android/pylib/uiautomator/test_runner.py
+++ b/build/android/pylib/uiautomator/test_runner.py
@@ -25,7 +25,6 @@ class TestRunner(instr_test_runner.TestRunner):
"""
# Create an InstrumentationOptions object to pass to the super class
instrumentation_options = instr_test_options.InstrumentationOptions(
- test_options.build_type,
test_options.tool,
test_options.cleanup_test_files,
test_options.push_deps,
@@ -36,6 +35,7 @@ class TestRunner(instr_test_runner.TestRunner):
test_options.save_perf_json,
test_options.screenshot_failures,
wait_for_debugger=False,
+ coverage_dir=None,
test_apk=None,
test_apk_path=None,
test_apk_jar_path=None)
diff --git a/build/android/pylib/utils/command_option_parser.py b/build/android/pylib/utils/command_option_parser.py
new file mode 100644
index 0000000000..636d7e1888
--- /dev/null
+++ b/build/android/pylib/utils/command_option_parser.py
@@ -0,0 +1,76 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""An option parser which handles the first arg as a command.
+
+Add other nice functionality such as printing a list of commands
+and an example in usage.
+"""
+
+import optparse
+import sys
+
+
+class CommandOptionParser(optparse.OptionParser):
+ """Wrapper class for OptionParser to help with listing commands."""
+
+ def __init__(self, *args, **kwargs):
+ """Creates a CommandOptionParser.
+
+ Args:
+ commands_dict: A dictionary mapping command strings to an object defining
+ - add_options_func: Adds options to the option parser
+ - run_command_func: Runs the command itself.
+ example: An example command.
+ everything else: Passed to optparse.OptionParser contructor.
+ """
+ self.commands_dict = kwargs.pop('commands_dict', [])
+ self.example = kwargs.pop('example', '')
+ if not 'usage' in kwargs:
+ kwargs['usage'] = 'Usage: %prog <command> [options]'
+ optparse.OptionParser.__init__(self, *args, **kwargs)
+
+ #override
+ def get_usage(self):
+ normal_usage = optparse.OptionParser.get_usage(self)
+ command_list = self.get_command_list()
+ example = self.get_example()
+ return self.expand_prog_name(normal_usage + example + command_list)
+
+ #override
+ def get_command_list(self):
+ if self.commands_dict.keys():
+ return '\nCommands:\n %s\n' % '\n '.join(
+ sorted(self.commands_dict.keys()))
+ return ''
+
+ def get_example(self):
+ if self.example:
+ return '\nExample:\n %s\n' % self.example
+ return ''
+
+
+def ParseAndExecute(option_parser, argv=None):
+ """Parses options/args from argv and runs the specified command.
+
+ Args:
+ option_parser: A CommandOptionParser object.
+ argv: Command line arguments. If None, automatically draw from sys.argv.
+
+ Returns:
+ An exit code.
+ """
+ if not argv:
+ argv = sys.argv
+
+ if len(argv) < 2 or argv[1] not in option_parser.commands_dict:
+ # Parse args first, if this is '--help', optparse will print help and exit
+ option_parser.parse_args(argv)
+ option_parser.error('Invalid command.')
+
+ command = argv[1]
+ option_parser.commands_dict[command].add_options_func(option_parser)
+ options, args = option_parser.parse_args(argv)
+ return option_parser.commands_dict[command].run_command_func(
+ command, options, args, option_parser)
diff --git a/build/android/pylib/utils/report_results.py b/build/android/pylib/utils/report_results.py
index 673ab2e2d7..75ef3222d0 100644
--- a/build/android/pylib/utils/report_results.py
+++ b/build/android/pylib/utils/report_results.py
@@ -13,10 +13,10 @@ from pylib import constants
import flakiness_dashboard_results_uploader
-def _LogToFile(results, test_type, suite_name, build_type):
+def _LogToFile(results, test_type, suite_name):
"""Log results to local files which can be used for aggregation later."""
log_file_path = os.path.join(constants.DIR_SOURCE_ROOT, 'out',
- build_type, 'test_logs')
+ constants.GetBuildType(), 'test_logs')
if not os.path.exists(log_file_path):
os.mkdir(log_file_path)
full_file_name = os.path.join(
@@ -64,7 +64,7 @@ def _LogToFlakinessDashboard(results, test_type, test_package,
def LogFull(results, test_type, test_package, annotation=None,
- build_type='Debug', flakiness_server=None):
+ flakiness_server=None):
"""Log the tests results for the test suite.
The results will be logged three different ways:
@@ -80,7 +80,6 @@ def LogFull(results, test_type, test_package, annotation=None,
'ContentShellTest' for instrumentation tests)
annotation: If instrumenation test type, this is a list of annotations
(e.g. ['Smoke', 'SmallTest']).
- build_type: Release/Debug
flakiness_server: If provider, upload the results to flakiness dashboard
with this URL.
"""
@@ -104,7 +103,7 @@ def LogFull(results, test_type, test_package, annotation=None,
suite_name = annotation[0]
else:
suite_name = test_package
- _LogToFile(results, test_type, suite_name, build_type)
+ _LogToFile(results, test_type, suite_name)
if flakiness_server:
_LogToFlakinessDashboard(results, test_type, test_package,
diff --git a/build/android/run_browser_tests.py b/build/android/run_browser_tests.py
deleted file mode 100755
index c536a2991e..0000000000
--- a/build/android/run_browser_tests.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Runs content_browsertests."""
-
-import logging
-import os
-import sys
-
-from pylib import cmd_helper
-
-
-if __name__ == '__main__':
- args = [os.path.join(os.path.dirname(__file__), 'test_runner.py'),
- 'gtest', '-s', 'content_browsertests'] + sys.argv[1:]
- logging.warning('*' * 80)
- logging.warning('This script is deprecated and will be removed soon.')
- logging.warning('Use the following instead: %s', ' '.join(args))
- logging.warning('*' * 80)
- sys.exit(1)
diff --git a/build/android/run_instrumentation_tests.py b/build/android/run_instrumentation_tests.py
deleted file mode 100755
index 7a9fa1ec81..0000000000
--- a/build/android/run_instrumentation_tests.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Runs both the Python and Java instrumentation tests."""
-
-import logging
-import os
-import sys
-
-from pylib import cmd_helper
-
-
-if __name__ == '__main__':
- args = [os.path.join(os.path.dirname(__file__), 'test_runner.py'),
- 'instrumentation'] + sys.argv[1:]
- logging.warning('*' * 80)
- logging.warning('This script is deprecated and will be removed soon.')
- logging.warning('Use the following instead: %s', ' '.join(args))
- logging.warning('*' * 80)
- sys.exit(1)
diff --git a/build/android/run_tests.py b/build/android/run_tests.py
deleted file mode 100755
index 59118cf3b2..0000000000
--- a/build/android/run_tests.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Runs all the native unit tests."""
-
-import logging
-import os
-import sys
-
-from pylib import cmd_helper
-
-
-if __name__ == '__main__':
- args = [os.path.join(os.path.dirname(__file__), 'test_runner.py'),
- 'gtest'] + sys.argv[1:]
- logging.warning('*' * 80)
- logging.warning('This script is deprecated and will be removed soon.')
- logging.warning('Use the following instead: %s', ' '.join(args))
- logging.warning('*' * 80)
- sys.exit(1)
diff --git a/build/android/run_uiautomator_tests.py b/build/android/run_uiautomator_tests.py
deleted file mode 100755
index 4a94a834e6..0000000000
--- a/build/android/run_uiautomator_tests.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Runs both the Python and Java UIAutomator tests."""
-
-import logging
-import os
-import sys
-
-from pylib import cmd_helper
-
-
-if __name__ == '__main__':
- args = ['python',
- os.path.join(os.path.dirname(__file__), 'test_runner.py'),
- 'uiautomator'] + sys.argv[1:]
- logging.warning('*' * 80)
- logging.warning('This script is deprecated and will be removed soon.')
- logging.warning('Use the following instead: %s', ' '.join(args))
- logging.warning('*' * 80)
- sys.exit(cmd_helper.RunCmd(args))
diff --git a/build/android/test_runner.py b/build/android/test_runner.py
index 705eec41f3..2c87df901f 100755
--- a/build/android/test_runner.py
+++ b/build/android/test_runner.py
@@ -11,11 +11,13 @@ TODO(gkanwar):
"""
import collections
+import logging
import optparse
import os
import shutil
import sys
+from pylib import android_commands
from pylib import constants
from pylib import ports
from pylib.base import base_test_result
@@ -28,8 +30,12 @@ from pylib.instrumentation import setup as instrumentation_setup
from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
+from pylib.perf import setup as perf_setup
+from pylib.perf import test_options as perf_test_options
+from pylib.perf import test_runner as perf_test_runner
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
+from pylib.utils import command_option_parser
from pylib.utils import report_results
from pylib.utils import run_tests_helper
@@ -91,6 +97,7 @@ def AddCommonOptions(option_parser):
def ProcessCommonOptions(options):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(options.verbose_count)
+ constants.SetBuildType(options.build_type)
def AddGTestOptions(option_parser):
@@ -214,7 +221,7 @@ def AddInstrumentationTestOptions(option_parser):
option_parser.add_option('-p', '--python_only', action='store_true',
default=False,
help='Run only the host-driven tests.')
- option_parser.add_option('--python_test_root',
+ option_parser.add_option('--host-driven-root',
help='Root of the host-driven tests.')
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
@@ -224,6 +231,9 @@ def AddInstrumentationTestOptions(option_parser):
help=('The name of the apk containing the tests '
'(without the .apk extension; e.g. "ContentShellTest"). '
'Alternatively, this can be a full path to the apk.'))
+ option_parser.add_option('--coverage-dir',
+ help=('Directory in which to place all generated '
+ 'EMMA coverage files.'))
def ProcessInstrumentationOptions(options, error_func):
@@ -250,7 +260,7 @@ def ProcessInstrumentationOptions(options, error_func):
elif options.python_only:
options.run_java_tests = False
- if not options.python_test_root:
+ if not options.host_driven_root:
options.run_python_tests = False
if not options.test_apk:
@@ -271,7 +281,6 @@ def ProcessInstrumentationOptions(options, error_func):
'%s.jar' % options.test_apk)
return instrumentation_test_options.InstrumentationOptions(
- options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
@@ -282,6 +291,7 @@ def ProcessInstrumentationOptions(options, error_func):
options.save_perf_json,
options.screenshot_failures,
options.wait_for_debugger,
+ options.coverage_dir,
options.test_apk,
options.test_apk_path,
options.test_apk_jar_path)
@@ -340,7 +350,6 @@ def ProcessUIAutomatorOptions(options, error_func):
'_java.jar')
return uiautomator_test_options.UIAutomatorOptions(
- options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
@@ -407,7 +416,6 @@ def ProcessMonkeyTestOptions(options, error_func):
category = options.category.split(',')
return monkey_test_options.MonkeyOptions(
- options.build_type,
options.verbose_count,
options.package_name,
options.activity_name,
@@ -418,7 +426,42 @@ def ProcessMonkeyTestOptions(options, error_func):
options.extra_args)
-def _RunGTests(options, error_func):
+def AddPerfTestOptions(option_parser):
+ """Adds perf test options to |option_parser|."""
+
+ option_parser.usage = '%prog perf [options]'
+ option_parser.command_list = []
+ option_parser.example = ('%prog perf --steps perf_steps.json')
+
+ option_parser.add_option('--steps', help='JSON file containing the list '
+ 'of perf steps to run.')
+ option_parser.add_option('--flaky-steps',
+ help='A JSON file containing steps that are flaky '
+ 'and will have its exit code ignored.')
+ option_parser.add_option('--print-step', help='The name of a previously '
+ 'executed perf step to print.')
+
+ AddCommonOptions(option_parser)
+
+
+def ProcessPerfTestOptions(options, error_func):
+ """Processes all perf test options.
+
+ Args:
+ options: optparse.Options object.
+ error_func: Function to call with the error message in case of an error.
+
+ Returns:
+ A PerfOptions named tuple which contains all options relevant to
+ perf tests.
+ """
+ if not options.steps and not options.print_step:
+ error_func('Please specify --steps or --print-step')
+ return perf_test_options.PerfOptions(
+ options.steps, options.flaky_steps, options.print_step)
+
+
+def _RunGTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs gtests."""
ProcessGTestOptions(options)
@@ -427,7 +470,6 @@ def _RunGTests(options, error_func):
# TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
# the gtest command.
gtest_options = gtest_test_options.GTestOptions(
- options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
@@ -435,13 +477,10 @@ def _RunGTests(options, error_func):
options.test_arguments,
options.timeout,
suite_name)
- runner_factory, tests = gtest_setup.Setup(gtest_options)
+ runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
results, test_exit_code = test_dispatcher.RunTests(
- tests, runner_factory, False, options.test_device,
- shard=True,
- build_type=options.build_type,
- test_timeout=None,
+ tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
@@ -451,7 +490,6 @@ def _RunGTests(options, error_func):
results=results,
test_type='Unit test',
test_package=suite_name,
- build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
@@ -460,10 +498,14 @@ def _RunGTests(options, error_func):
return exit_code
-def _RunInstrumentationTests(options, error_func):
+def _RunInstrumentationTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
instrumentation_options = ProcessInstrumentationOptions(options, error_func)
+ if len(devices) > 1 and options.wait_for_debugger:
+ logging.warning('Debugger can not be sharded, using first available device')
+ devices = devices[:1]
+
results = base_test_result.TestRunResults()
exit_code = 0
@@ -471,27 +513,19 @@ def _RunInstrumentationTests(options, error_func):
runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
test_results, exit_code = test_dispatcher.RunTests(
- tests, runner_factory, options.wait_for_debugger,
- options.test_device,
- shard=True,
- build_type=options.build_type,
- test_timeout=None,
+ tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
if options.run_python_tests:
runner_factory, tests = host_driven_setup.InstrumentationSetup(
- options.python_test_root, options.official_build,
+ options.host_driven_root, options.official_build,
instrumentation_options)
if tests:
test_results, test_exit_code = test_dispatcher.RunTests(
- tests, runner_factory, False,
- options.test_device,
- shard=True,
- build_type=options.build_type,
- test_timeout=None,
+ tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
@@ -505,23 +539,19 @@ def _RunInstrumentationTests(options, error_func):
test_type='Instrumentation',
test_package=os.path.basename(options.test_apk),
annotation=options.annotations,
- build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
-def _RunUIAutomatorTests(options, error_func):
+def _RunUIAutomatorTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs uiautomator tests."""
uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
results, exit_code = test_dispatcher.RunTests(
- tests, runner_factory, False, options.test_device,
- shard=True,
- build_type=options.build_type,
- test_timeout=None,
+ tests, runner_factory, devices, shard=True, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
@@ -529,30 +559,71 @@ def _RunUIAutomatorTests(options, error_func):
test_type='UIAutomator',
test_package=os.path.basename(options.test_jar),
annotation=options.annotations,
- build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
-def _RunMonkeyTests(options, error_func):
+def _RunMonkeyTests(options, error_func, devices):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(options, error_func)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
- tests, runner_factory, False, None, shard=False, test_timeout=None)
+ tests, runner_factory, devices, shard=False, test_timeout=None)
report_results.LogFull(
results=results,
test_type='Monkey',
- test_package='Monkey',
- build_type=options.build_type)
+ test_package='Monkey')
return exit_code
+def _RunPerfTests(options, error_func, devices):
+ """Subcommand of RunTestsCommands which runs perf tests."""
+ perf_options = ProcessPerfTestOptions(options, error_func)
+ # Just print the results from a single previously executed step.
+ if perf_options.print_step:
+ return perf_test_runner.PrintTestOutput(perf_options.print_step)
+
+ runner_factory, tests = perf_setup.Setup(perf_options)
+
+ results, _ = test_dispatcher.RunTests(
+ tests, runner_factory, devices, shard=True, test_timeout=None)
+
+ report_results.LogFull(
+ results=results,
+ test_type='Perf',
+ test_package='Perf')
+ # Always return 0 on the sharding stage. Individual tests exit_code
+ # will be returned on the print_step stage.
+ return 0
+
+
+def _GetAttachedDevices(test_device=None):
+ """Get all attached devices.
+
+ Args:
+ test_device: Name of a specific device to use.
+
+ Returns:
+ A list of attached devices.
+ """
+ attached_devices = []
+
+ attached_devices = android_commands.GetAttachedDevices()
+ if test_device:
+ assert test_device in attached_devices, (
+ 'Did not find device %s among attached device. Attached devices: %s'
+ % (test_device, ', '.join(attached_devices)))
+ attached_devices = [test_device]
+
+ assert attached_devices, 'No devices attached.'
+
+ return sorted(attached_devices)
+
def RunTestsCommand(command, options, args, option_parser):
"""Checks test type and dispatches to the appropriate function.
@@ -579,14 +650,18 @@ def RunTestsCommand(command, options, args, option_parser):
ProcessCommonOptions(options)
+ devices = _GetAttachedDevices(options.test_device)
+
if command == 'gtest':
- return _RunGTests(options, option_parser.error)
+ return _RunGTests(options, option_parser.error, devices)
elif command == 'instrumentation':
- return _RunInstrumentationTests(options, option_parser.error)
+ return _RunInstrumentationTests(options, option_parser.error, devices)
elif command == 'uiautomator':
- return _RunUIAutomatorTests(options, option_parser.error)
+ return _RunUIAutomatorTests(options, option_parser.error, devices)
elif command == 'monkey':
- return _RunMonkeyTests(options, option_parser.error)
+ return _RunMonkeyTests(options, option_parser.error, devices)
+ elif command == 'perf':
+ return _RunPerfTests(options, option_parser.error, devices)
else:
raise Exception('Unknown test type.')
@@ -645,49 +720,16 @@ VALID_COMMANDS = {
AddUIAutomatorTestOptions, RunTestsCommand),
'monkey': CommandFunctionTuple(
AddMonkeyTestOptions, RunTestsCommand),
+ 'perf': CommandFunctionTuple(
+ AddPerfTestOptions, RunTestsCommand),
'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
}
-class CommandOptionParser(optparse.OptionParser):
- """Wrapper class for OptionParser to help with listing commands."""
-
- def __init__(self, *args, **kwargs):
- self.command_list = kwargs.pop('command_list', [])
- self.example = kwargs.pop('example', '')
- optparse.OptionParser.__init__(self, *args, **kwargs)
-
- #override
- def get_usage(self):
- normal_usage = optparse.OptionParser.get_usage(self)
- command_list = self.get_command_list()
- example = self.get_example()
- return self.expand_prog_name(normal_usage + example + command_list)
-
- #override
- def get_command_list(self):
- if self.command_list:
- return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list))
- return ''
-
- def get_example(self):
- if self.example:
- return '\nExample:\n %s\n' % self.example
- return ''
-
-
def main(argv):
- option_parser = CommandOptionParser(
- usage='Usage: %prog <command> [options]',
- command_list=VALID_COMMANDS.keys())
-
- if len(argv) < 2 or argv[1] not in VALID_COMMANDS:
- option_parser.error('Invalid command.')
- command = argv[1]
- VALID_COMMANDS[command].add_options_func(option_parser)
- options, args = option_parser.parse_args(argv)
- return VALID_COMMANDS[command].run_command_func(
- command, options, args, option_parser)
+ option_parser = command_option_parser.CommandOptionParser(
+ commands_dict=VALID_COMMANDS)
+ return command_option_parser.ParseAndExecute(option_parser)
if __name__ == '__main__':
diff --git a/build/android/run_update_verification.py b/build/android/update_verification.py
index 5f62f3e40b..0c349c31be 100755
--- a/build/android/run_update_verification.py
+++ b/build/android/update_verification.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/build/common.gypi b/build/common.gypi
index e95efab421..e70eaac88a 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -523,10 +523,8 @@
'enable_automation%': 0,
'enable_extensions%': 0,
'enable_google_now%': 0,
- 'enable_printing%': 0,
'enable_spellcheck%': 0,
'enable_themes%': 0,
- 'proprietary_codecs%': 1,
'remoting%': 0,
'arm_neon%': 0,
'arm_neon_optional%': 1,
@@ -534,6 +532,22 @@
'native_memory_pressure_signals%': 1,
}],
+ # Enable basic printing for Chrome for Android but disable printing
+ # completely for WebView.
+ ['OS=="android" and android_webview_build==0', {
+ 'enable_printing%': 2,
+ }],
+ ['OS=="android" and android_webview_build==1', {
+ 'enable_printing%': 0,
+ }],
+
+ # Android OS includes support for proprietary codecs regardless of
+ # building Chromium or Google Chrome. We also ship Google Chrome with
+ # proprietary codecs.
+ ['OS=="android" or branding=="Chrome"', {
+ 'proprietary_codecs%': 1,
+ }],
+
# Enable autofill dialog for Android, Mac and Views-enabled platforms.
['toolkit_views==1 or (OS=="android" and android_webview_build==0) or OS=="mac"', {
'enable_autofill_dialog%': 1
@@ -910,6 +924,10 @@
# Currently ignored on Windows.
'coverage%': 0,
+ # Set to 1 to enable java code coverage. Instruments classes during build
+ # to produce .ec files during runtime.
+ 'emma_coverage%': 0,
+
# Set to 1 to force Visual C++ to use legacy debug information format /Z7.
# This is useful for parallel compilation tools which can't support /Zi.
# Only used on Windows.
@@ -923,9 +941,6 @@
# to ~/.gyp/include.gypi, gclient runhooks --force, and do a release build.
'win_use_allocator_shim%': 1, # 1 = shim allocator via libcmt; 0 = msvcrt
- # Whether usage of OpenMAX is enabled.
- 'enable_openmax%': 0,
-
# Whether proprietary audio/video codecs are assumed to be included with
# this build (only meaningful if branding!=Chrome).
'proprietary_codecs%': 0,
@@ -1435,6 +1450,9 @@
}],
['component=="shared_library"', {
'win_use_allocator_shim%': 0,
+ },{
+ # Turn on multiple dll by default on Windows when in static_library.
+ 'chrome_multiple_dll%': 1,
}],
['component=="shared_library" and "<(GENERATOR)"=="ninja"', {
# Only enabled by default for ninja because it's buggy in VS.
@@ -1571,8 +1589,7 @@
}],
['OS == "ios"', {
'grit_defines': [
- # define for iOS specific resources.
- '-D', 'ios',
+ '-t', 'ios',
# iOS uses a whitelist to filter resources.
'-w', '<(DEPTH)/build/ios/grit_whitelist.txt'
],
@@ -1899,9 +1916,6 @@
'<(DEPTH)/base/allocator/allocator.gyp:type_profiler',
],
}],
- ['chrome_multiple_dll', {
- 'defines': ['CHROME_MULTIPLE_DLL'],
- }],
['OS=="linux" and clang==1 and host_arch=="ia32"', {
# TODO(dmikurube): Remove -Wno-sentinel when Clang/LLVM is fixed.
# See http://crbug.com/162818.
@@ -2989,6 +3003,7 @@
['chromeos==1 and disable_sse2==0', {
'cflags': [
'-msse2',
+ '-mfpmath=sse',
],
}],
# Use gold linker for Android ia32 target.
diff --git a/build/filename_rules.gypi b/build/filename_rules.gypi
index a8d429b7aa..cf8c3572e5 100644
--- a/build/filename_rules.gypi
+++ b/build/filename_rules.gypi
@@ -86,7 +86,7 @@
]
}],
['<(use_aura)==0 or <(use_x11)==0 or >(nacl_untrusted_build)==1', {
- 'sources/': [ ['exclude', '_aurax11\\.(h|cc)$'] ]
+ 'sources/': [ ['exclude', '_aurax11(_browsertest|_unittest)?\\.(h|cc)$'] ]
}],
['<(use_aura)==0 or OS!="win" or >(nacl_untrusted_build)==1', {
'sources/': [ ['exclude', '_aurawin\\.(h|cc)$'] ]
diff --git a/build/gdb-add-index b/build/gdb-add-index
index 4975532213..0d66d8dac8 100755
--- a/build/gdb-add-index
+++ b/build/gdb-add-index
@@ -4,8 +4,84 @@
# found in the LICENSE file.
#
# Saves the gdb index for a given binary and its shared library dependencies.
+#
+# This will run gdb index in parallel on a number of binaries using SIGUSR1
+# as the communication mechanism to simulate a semaphore. Because of the
+# nature of this technique, using "set -e" is very difficult. The SIGUSR1
+# terminates a "wait" with an error which we need to interpret.
+#
+# When modifying this code, most of the real logic is in the index_one_file
+# function. The rest is cleanup + sempahore plumbing.
+
+# Cleanup temp directory and ensure all child jobs are dead-dead.
+function on_exit {
+ trap "" EXIT USR1 # Avoid reentrancy.
+
+ local jobs=$(jobs -p)
+ if [ -n "$jobs" ]; then
+ echo -n "Killing outstanding index jobs..."
+ kill -KILL $(jobs -p)
+ wait
+ echo "done"
+ fi
+
+ if [ -f "$DIRECTORY" ]; then
+ echo -n "Removing temp directory $DIRECTORY..."
+ rm -rf $DIRECTORY
+ echo done
+ fi
+}
+
+# Add index to one binary.
+function index_one_file {
+ local file=$1
+ local basename=$(basename "$file")
+
+ local readelf_out=$(readelf -S "$file")
+ if [[ $readelf_out =~ "gdb_index" ]]; then
+ echo "Skipped $basename -- already contains index."
+ else
+ local start=$(date +"%s%N")
+ echo "Adding index to $basename..."
+
+ gdb -batch "$file" -ex "save gdb-index $DIRECTORY" -ex "quit"
+ local index_file="$DIRECTORY/$basename.gdb-index"
+ if [ -f "$index_file" ]; then
+ objcopy --add-section .gdb_index="$index_file" \
+ --set-section-flags .gdb_index=readonly "$file" "$file"
+ local finish=$(date +"%s%N")
+ local elappsed=$(((finish - start)/1000000))
+ echo " ...$basename indexed. [${elappsed}ms]"
+ else
+ echo " ...$basename unindexable."
+ fi
+ fi
+}
-set -e
+# Functions that when combined, concurrently index all files in FILES_TO_INDEX
+# array. The global FILES_TO_INDEX is declared in the main body of the script.
+function async_index {
+ # Start a background subshell to run the index command.
+ {
+ index_one_file $1
+ kill -SIGUSR1 $$ # $$ resolves to the parent script.
+ exit 129 # See comment above wait loop at bottom.
+ } &
+}
+
+CUR_FILE_NUM=0
+function index_next {
+ if (( CUR_FILE_NUM >= ${#FILES_TO_INDEX[@]} )); then
+ return
+ fi
+
+ async_index "${FILES_TO_INDEX[CUR_FILE_NUM]}"
+ ((CUR_FILE_NUM += 1)) || true
+}
+
+
+########
+### Main body of the script.
if [[ ! $# == 1 ]]; then
echo "Usage: $0 path-to-binary"
@@ -18,30 +94,38 @@ if [[ ! -f "$FILENAME" ]]; then
exit 1
fi
+# Ensure we cleanup on on exit.
+trap on_exit EXIT
+
# We're good to go! Create temp directory for index files.
DIRECTORY=$(mktemp -d)
echo "Made temp directory $DIRECTORY."
-# Always remove directory on exit.
-trap "{ echo -n Removing temp directory $DIRECTORY...;
- rm -rf $DIRECTORY; echo done; }" EXIT
-
-# Grab all the chromium shared library files.
-so_files=$(ldd "$FILENAME" 2>/dev/null \
+# Create array with the filename and all shared libraries that
+# have the same dirname. The dirname is a signal that these
+# shared libraries were part of the same build as the binary.
+declare -a FILES_TO_INDEX=($FILENAME
+ $(ldd "$FILENAME" 2>/dev/null \
| grep $(dirname "$FILENAME") \
| sed "s/.*[ \t]\(.*\) (.*/\1/")
+)
-# Add index to binary and the shared library dependencies.
-for file in "$FILENAME" $so_files; do
- basename=$(basename "$file")
- echo -n "Adding index to $basename..."
- readelf_out=$(readelf -S "$file")
- if [[ $readelf_out =~ "gdb_index" ]]; then
- echo "already contains index. Skipped."
- else
- gdb -batch "$file" -ex "save gdb-index $DIRECTORY" -ex "quit"
- objcopy --add-section .gdb_index="$DIRECTORY"/$basename.gdb-index \
- --set-section-flags .gdb_index=readonly "$file" "$file"
- echo "done."
- fi
+# Start concurrent indexing.
+trap index_next USR1
+
+# 4 is an arbitrary default. When changing, remember we are likely IO bound
+# so basing this off the number of cores is not sensible.
+INDEX_TASKS=${INDEX_TASKS:-4}
+for ((i=0;i<${INDEX_TASKS};i++)); do
+ index_next
+done
+
+# Do a wait loop. Bash waits that terminate due a trap have an exit
+# code > 128. We also ensure that our subshell's "normal" exit occurs with
+# an exit code > 128. This allows us to do consider a > 128 exit code as
+# an indication that the loop should continue. Unfortunately, it also means
+# we cannot use set -e since technically the "wait" is failing.
+wait
+while (( $? > 128 )); do
+ wait
done
diff --git a/build/get_landmines.py b/build/get_landmines.py
new file mode 100755
index 0000000000..05c9de6962
--- /dev/null
+++ b/build/get_landmines.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This file emits the list of reasons why a particular build needs to be clobbered
+(or a list of 'landmines').
+"""
+
+import optparse
+import sys
+
+import landmine_utils
+
+
+builder = landmine_utils.platform
+distributor = landmine_utils.distributor
+gyp_defines = landmine_utils.gyp_defines
+gyp_msvs_version = landmine_utils.gyp_msvs_version
+platform = landmine_utils.platform
+
+
+def print_landmines(target):
+ """
+ ALL LANDMINES ARE EMITTED FROM HERE.
+ target can be one of {'Release', 'Debug', 'Debug_x64', 'Release_x64'}.
+ """
+ if (distributor() == 'goma' and platform() == 'win32' and
+ builder() == 'ninja'):
+ print 'Need to clobber winja goma due to backend cwd cache fix.'
+ if platform() == 'android':
+ print 'Clobber: Resources removed in r195014 require clobber.'
+ if platform() == 'win' and builder() == 'ninja':
+ print 'Compile on cc_unittests fails due to symbols removed in r185063.'
+ if platform() == 'linux' and builder() == 'ninja':
+ print 'Builders switching from make to ninja will clobber on this.'
+ if platform() == 'mac':
+ print 'Switching from bundle to unbundled dylib (issue 14743002).'
+ if (platform() == 'win' and builder() == 'ninja' and
+ gyp_msvs_version() == '2012' and
+ gyp_defines().get('target_arch') == 'x64' and
+ gyp_defines().get('dcheck_always_on') == '1'):
+ print "Switched win x64 trybots from VS2010 to VS2012."
+ print 'Need to clobber everything due to an IDL change in r154579 (blink)'
+
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option('-t', '--target',
+ help=='Target for which the landmines have to be emitted')
+
+ options, args = parser.parse_args()
+
+ if args:
+ parser.error('Unknown arguments %s' % args)
+
+ print_landmines(options.target)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/build/ios/grit_whitelist.txt b/build/ios/grit_whitelist.txt
index c72022c6d2..ed15d463a1 100644
--- a/build/ios/grit_whitelist.txt
+++ b/build/ios/grit_whitelist.txt
@@ -565,8 +565,6 @@ IDS_FLAGS_ENABLE_PASSWORD_GENERATION_DESCRIPTION
IDS_FLAGS_ENABLE_PASSWORD_GENERATION_NAME
IDS_FLAGS_ENABLE_PINCH_SCALE_DESCRIPTION
IDS_FLAGS_ENABLE_PINCH_SCALE_NAME
-IDS_FLAGS_ENABLE_PNACL_DESCRIPTION
-IDS_FLAGS_ENABLE_PNACL_NAME
IDS_FLAGS_ENABLE_REQUEST_TABLET_SITE_DESCRIPTION
IDS_FLAGS_ENABLE_REQUEST_TABLET_SITE_NAME
IDS_FLAGS_ENABLE_RICH_NOTIFICATIONS_DESCRIPTION
@@ -672,6 +670,8 @@ IDS_FLAGS_PERFORMANCE_MONITOR_GATHERING_DESCRIPTION
IDS_FLAGS_PERFORMANCE_MONITOR_GATHERING_NAME
IDS_FLAGS_PER_TILE_PAINTING_DESCRIPTION
IDS_FLAGS_PER_TILE_PAINTING_NAME
+IDS_FLAGS_PNACL_DESCRIPTION
+IDS_FLAGS_PNACL_NAME
IDS_FLAGS_PRESENT_WITH_GDI_ALL_SHOW
IDS_FLAGS_PRESENT_WITH_GDI_DESCRIPTION
IDS_FLAGS_PRESENT_WITH_GDI_FIRST_SHOW
diff --git a/build/java.gypi b/build/java.gypi
index 1635c71da4..95858ee298 100644
--- a/build/java.gypi
+++ b/build/java.gypi
@@ -55,8 +55,11 @@
'additional_src_dirs': [],
'javac_includes': [],
'jar_name': '<(_target_name).jar',
- 'jar_path': '<(PRODUCT_DIR)/lib.java/<(jar_name)',
+ 'jar_dir': '<(PRODUCT_DIR)/lib.java',
+ 'jar_path': '<(intermediate_dir)/<(jar_name)',
+ 'jar_final_path': '<(jar_dir)/<(jar_name)',
'jar_excluded_classes': [ '*/R.class', '*/R##*.class' ],
+ 'instr_stamp': '<(intermediate_dir)/instr.stamp',
'additional_input_paths': [],
'dex_path': '<(PRODUCT_DIR)/lib.java/<(_target_name).dex.jar',
'generated_src_dirs': ['>@(generated_R_dirs)'],
@@ -70,12 +73,34 @@
'intermediate_dir': '<(SHARED_INTERMEDIATE_DIR)/<(_target_name)',
'classes_dir': '<(intermediate_dir)/classes',
'compile_stamp': '<(intermediate_dir)/compile.stamp',
+ 'proguard_config%': '',
+ 'proguard_preprocess%': '0',
+ 'variables': {
+ 'variables': {
+ 'proguard_preprocess%': 0,
+ },
+ 'conditions': [
+ ['proguard_preprocess == 1', {
+ 'javac_jar_path': '<(intermediate_dir)/<(_target_name).pre.jar'
+ }, {
+ 'javac_jar_path': '<(jar_path)'
+ }],
+ ],
+ },
+ 'javac_jar_path': '<(javac_jar_path)',
+ 'conditions': [
+ ['chromium_code != 0 and emma_coverage != 0', {
+ 'emma_instrument': 1,
+ }, {
+ 'emma_instrument': 0,
+ }],
+ ],
},
# This all_dependent_settings is used for java targets only. This will add the
# jar path to the classpath of dependent java targets.
'all_dependent_settings': {
'variables': {
- 'input_jars_paths': ['<(jar_path)'],
+ 'input_jars_paths': ['<(jar_final_path)'],
'library_dexed_jars_paths': ['<(dex_path)'],
},
},
@@ -218,6 +243,35 @@
},
],
}],
+ ['proguard_preprocess == 1', {
+ 'actions': [
+ {
+ 'action_name': 'proguard_<(_target_name)',
+ 'message': 'Proguard preprocessing <(_target_name) jar',
+ 'inputs': [
+ '<(android_sdk_root)/tools/proguard/bin/proguard.sh',
+ '<(DEPTH)/build/android/gyp/util/build_utils.py',
+ '<(DEPTH)/build/android/gyp/proguard.py',
+ '<(javac_jar_path)',
+ '<(proguard_config)',
+ ],
+ 'outputs': [
+ '<(jar_path)',
+ ],
+ 'action': [
+ 'python', '<(DEPTH)/build/android/gyp/proguard.py',
+ '--proguard-path=<(android_sdk_root)/tools/proguard/bin/proguard.sh',
+ '--input-path=<(javac_jar_path)',
+ '--output-path=<(jar_path)',
+ '--proguard-config=<(proguard_config)',
+ '--classpath=<(android_sdk_jar) >(input_jars_paths)',
+
+ # TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
+ '--ignore=>!(echo \'>(_inputs)\' | md5sum)',
+ ]
+ },
+ ],
+ }],
],
'actions': [
{
@@ -263,12 +317,12 @@
'<(compile_stamp)',
],
'outputs': [
- '<(jar_path)',
+ '<(javac_jar_path)',
],
'action': [
'python', '<(DEPTH)/build/android/gyp/jar.py',
'--classes-dir=<(classes_dir)',
- '--jar-path=<(jar_path)',
+ '--jar-path=<(javac_jar_path)',
'--excluded-classes=<(jar_excluded_classes)',
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
@@ -276,21 +330,38 @@
]
},
{
+ 'action_name': 'instr_jar_<(_target_name)',
+ 'message': 'Instrumenting <(_target_name) jar',
+ 'variables': {
+ 'input_path': '<(jar_path)',
+ 'output_path': '<(jar_final_path)',
+ 'stamp_path': '<(instr_stamp)',
+ 'instr_type': 'jar',
+ },
+ 'outputs': [
+ '<(jar_final_path)',
+ ],
+ 'inputs': [
+ '<(jar_path)',
+ ],
+ 'includes': [ 'android/instr_action.gypi' ],
+ },
+ {
'action_name': 'jar_toc_<(_target_name)',
'message': 'Creating <(_target_name) jar.TOC',
'inputs': [
'<(DEPTH)/build/android/gyp/util/build_utils.py',
'<(DEPTH)/build/android/gyp/util/md5_check.py',
'<(DEPTH)/build/android/gyp/jar_toc.py',
- '<(jar_path)',
+ '<(jar_final_path)',
],
'outputs': [
- '<(jar_path).TOC',
+ '<(jar_final_path).TOC',
],
'action': [
'python', '<(DEPTH)/build/android/gyp/jar_toc.py',
- '--jar-path=<(jar_path)',
- '--toc-path=<(jar_path).TOC',
+ '--jar-path=<(jar_final_path)',
+ '--toc-path=<(jar_final_path).TOC',
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
'--ignore=>!(echo \'>(_inputs)\' | md5sum)',
@@ -299,7 +370,12 @@
{
'action_name': 'dex_<(_target_name)',
'variables': {
- 'dex_input_paths': [ '<(jar_path)' ],
+ 'conditions': [
+ ['emma_instrument != 0', {
+ 'dex_no_locals': 1,
+ }],
+ ],
+ 'dex_input_paths': [ '<(jar_final_path)' ],
'output_path': '<(dex_path)',
},
'includes': [ 'android/dex_action.gypi' ],
diff --git a/build/java_apk.gypi b/build/java_apk.gypi
index 2457d3a193..d8a6cacf3e 100644
--- a/build/java_apk.gypi
+++ b/build/java_apk.gypi
@@ -87,15 +87,18 @@
'native_libraries_template_data_file': '<(native_libraries_template_data_dir)/native_libraries_array.h',
'native_libraries_template_data_stamp': '<(intermediate_dir)/native_libraries_template_data.stamp',
'compile_stamp': '<(intermediate_dir)/compile.stamp',
+ 'instr_stamp': '<(intermediate_dir)/instr.stamp',
'jar_stamp': '<(intermediate_dir)/jar.stamp',
'obfuscate_stamp': '<(intermediate_dir)/obfuscate.stamp',
'strip_stamp': '<(intermediate_dir)/strip.stamp',
'classes_dir': '<(intermediate_dir)/classes',
+ 'classes_final_dir': '<(intermediate_dir)/classes_instr',
'javac_includes': [],
'jar_excluded_classes': [],
'jar_path': '<(PRODUCT_DIR)/lib.java/<(jar_name)',
'obfuscated_jar_path': '<(intermediate_dir)/obfuscated.jar',
'dex_path': '<(intermediate_dir)/classes.dex',
+ 'emma_device_jar': '<(android_sdk_root)/tools/lib/emma_device.jar',
'android_manifest_path%': '<(java_in_dir)/AndroidManifest.xml',
'push_stamp': '<(intermediate_dir)/push.stamp',
'link_stamp': '<(intermediate_dir)/link.stamp',
@@ -128,6 +131,7 @@
],
},
'native_lib_target%': '',
+ 'emma_instrument': '<(emma_coverage)',
'apk_package_native_libs_dir': '<(apk_package_native_libs_dir)',
'unsigned_standalone_apk_path': '<(unsigned_standalone_apk_path)',
},
@@ -371,6 +375,11 @@
},
],
}],
+ ['is_test_apk == 1', {
+ 'dependencies': [
+ '<(DEPTH)/tools/android/android_tools.gyp:android_tools',
+ ]
+ }],
],
'actions': [
{
@@ -461,20 +470,37 @@
],
},
{
+ 'action_name': 'instr_classes_<(_target_name)',
+ 'message': 'Instrumenting <(_target_name) classes',
+ 'variables': {
+ 'input_path': '<(classes_dir)',
+ 'output_path': '<(classes_final_dir)',
+ 'stamp_path': '<(instr_stamp)',
+ 'instr_type': 'classes',
+ },
+ 'outputs': [
+ '<(instr_stamp)',
+ ],
+ 'inputs': [
+ '<(compile_stamp)',
+ ],
+ 'includes': [ 'android/instr_action.gypi' ],
+ },
+ {
'action_name': 'jar_<(_target_name)',
'message': 'Creating <(_target_name) jar',
'inputs': [
+ '<(instr_stamp)',
'<(DEPTH)/build/android/gyp/util/build_utils.py',
'<(DEPTH)/build/android/gyp/util/md5_check.py',
'<(DEPTH)/build/android/gyp/jar.py',
- '<(compile_stamp)',
],
'outputs': [
'<(jar_stamp)',
],
'action': [
'python', '<(DEPTH)/build/android/gyp/jar.py',
- '--classes-dir=<(classes_dir)',
+ '--classes-dir=<(classes_final_dir)',
'--jar-path=<(jar_path)',
'--excluded-classes=<(jar_excluded_classes)',
'--stamp=<(jar_stamp)',
@@ -491,7 +517,7 @@
'<(DEPTH)/build/android/ant/create-test-jar.js',
'<(DEPTH)/build/android/gyp/util/build_utils.py',
'<(DEPTH)/build/android/gyp/ant.py',
- '<(compile_stamp)',
+ '<(instr_stamp)',
'>@(proguard_flags_paths)',
],
'outputs': [
@@ -537,10 +563,16 @@
'input_paths': [ '<(obfuscate_stamp)' ],
'proguard_enabled_input_path': '<(obfuscated_jar_path)',
}],
+ ['emma_instrument != 0', {
+ 'dex_no_locals': 1,
+ }],
+ ['emma_instrument != 0 and is_test_apk == 0', {
+ 'dex_input_paths': [ '<(emma_device_jar)' ],
+ }],
],
- 'input_paths': [ '<(compile_stamp)' ],
+ 'input_paths': [ '<(instr_stamp)' ],
'dex_input_paths': [ '>@(library_dexed_jars_paths)' ],
- 'dex_generated_input_dirs': [ '<(classes_dir)' ],
+ 'dex_generated_input_dirs': [ '<(classes_final_dir)' ],
'output_path': '<(dex_path)',
},
'includes': [ 'android/dex_action.gypi' ],
@@ -625,6 +657,8 @@
'-DOUT_DIR=<(intermediate_dir)',
'-DSOURCE_DIR=<(source_dir)',
'-DUNSIGNED_APK_PATH=<(unsigned_apk_path)',
+ '-DEMMA_INSTRUMENT=<(emma_instrument)',
+ '-DEMMA_DEVICE_JAR=<(emma_device_jar)',
'-Dbasedir=.',
'-buildfile',
diff --git a/build/java_prebuilt.gypi b/build/java_prebuilt.gypi
index 80003749fc..cec881dfd1 100644
--- a/build/java_prebuilt.gypi
+++ b/build/java_prebuilt.gypi
@@ -24,13 +24,62 @@
],
'variables': {
'dex_path': '<(PRODUCT_DIR)/lib.java/<(_target_name).dex.jar',
+ 'intermediate_dir': '<(SHARED_INTERMEDIATE_DIR)/<(_target_name)',
+ 'android_jar': '<(android_sdk)/android.jar',
+ 'input_jars_paths': [ '<(android_jar)' ],
+ 'proguard_config%': '',
+ 'proguard_preprocess%': '0',
+ 'variables': {
+ 'variables': {
+ 'proguard_preprocess%': 0,
+ },
+ 'conditions': [
+ ['proguard_preprocess == 1', {
+ 'dex_input_jar_path': '<(intermediate_dir)/<(_target_name).pre.jar'
+ }, {
+ 'dex_input_jar_path': '<(jar_path)'
+ }],
+ ],
+ },
+ 'dex_input_jar_path': '<(dex_input_jar_path)',
},
'all_dependent_settings': {
'variables': {
- 'input_jars_paths': ['<(jar_path)'],
+ 'input_jars_paths': ['<(dex_input_jar_path)'],
'library_dexed_jars_paths': ['<(dex_path)'],
},
},
+ 'conditions' : [
+ ['proguard_preprocess == 1', {
+ 'actions': [
+ {
+ 'action_name': 'proguard_<(_target_name)',
+ 'message': 'Proguard preprocessing <(_target_name) jar',
+ 'inputs': [
+ '<(android_sdk_root)/tools/proguard/bin/proguard.sh',
+ '<(DEPTH)/build/android/gyp/util/build_utils.py',
+ '<(DEPTH)/build/android/gyp/proguard.py',
+ '<(jar_path)',
+ '<(proguard_config)',
+ ],
+ 'outputs': [
+ '<(dex_input_jar_path)',
+ ],
+ 'action': [
+ 'python', '<(DEPTH)/build/android/gyp/proguard.py',
+ '--proguard-path=<(android_sdk_root)/tools/proguard/bin/proguard.sh',
+ '--input-path=<(jar_path)',
+ '--output-path=<(dex_input_jar_path)',
+ '--proguard-config=<(proguard_config)',
+ '--classpath=>(input_jars_paths)',
+
+ # TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
+ '--ignore=>!(echo \'>(_inputs)\' | md5sum)',
+ ]
+ },
+ ],
+ }],
+ ],
'actions': [
{
'action_name': 'dex_<(_target_name)',
@@ -38,7 +87,7 @@
'inputs': [
'<(DEPTH)/build/android/gyp/util/build_utils.py',
'<(DEPTH)/build/android/gyp/dex.py',
- '<(jar_path)',
+ '<(dex_input_jar_path)',
],
'outputs': [
'<(dex_path)',
@@ -51,7 +100,7 @@
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
'--ignore=>!(echo \'>(_inputs)\' | md5sum)',
- '<(jar_path)',
+ '<(dex_input_jar_path)',
]
},
diff --git a/build/landmine_utils.py b/build/landmine_utils.py
new file mode 100644
index 0000000000..021fc9b711
--- /dev/null
+++ b/build/landmine_utils.py
@@ -0,0 +1,114 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import functools
+import logging
+import os
+import shlex
+import sys
+
+
+def memoize(default=None):
+ """This decorator caches the return value of a parameterless pure function"""
+ def memoizer(func):
+ val = []
+ @functools.wraps(func)
+ def inner():
+ if not val:
+ ret = func()
+ val.append(ret if ret is not None else default)
+ if logging.getLogger().isEnabledFor(logging.INFO):
+ print '%s -> %r' % (func.__name__, val[0])
+ return val[0]
+ return inner
+ return memoizer
+
+
+@memoize()
+def IsWindows():
+ return sys.platform in ['win32', 'cygwin']
+
+
+@memoize()
+def IsLinux():
+ return sys.platform.startswith('linux')
+
+
+@memoize()
+def IsMac():
+ return sys.platform == 'darwin'
+
+
+@memoize()
+def gyp_defines():
+ """Parses and returns GYP_DEFINES env var as a dictionary."""
+ return dict(arg.split('=', 1)
+ for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
+
+@memoize()
+def gyp_msvs_version():
+ return os.environ.get('GYP_MSVS_VERSION', '')
+
+@memoize()
+def distributor():
+ """
+ Returns a string which is the distributed build engine in use (if any).
+ Possible values: 'goma', 'ib', ''
+ """
+ if 'goma' in gyp_defines():
+ return 'goma'
+ elif IsWindows():
+ if 'CHROME_HEADLESS' in os.environ:
+ return 'ib' # use (win and !goma and headless) as approximation of ib
+
+
+@memoize()
+def platform():
+ """
+ Returns a string representing the platform this build is targetted for.
+ Possible values: 'win', 'mac', 'linux', 'ios', 'android'
+ """
+ if 'OS' in gyp_defines():
+ if 'android' in gyp_defines()['OS']:
+ return 'android'
+ else:
+ return gyp_defines()['OS']
+ elif IsWindows():
+ return 'win'
+ elif IsLinux():
+ return 'linux'
+ else:
+ return 'mac'
+
+
+@memoize()
+def builder():
+ """
+ Returns a string representing the build engine (not compiler) to use.
+ Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
+ """
+ if 'GYP_GENERATORS' in os.environ:
+ # for simplicity, only support the first explicit generator
+ generator = os.environ['GYP_GENERATORS'].split(',')[0]
+ if generator.endswith('-android'):
+ return generator.split('-')[0]
+ elif generator.endswith('-ninja'):
+ return 'ninja'
+ else:
+ return generator
+ else:
+ if platform() == 'android':
+ # Good enough for now? Do any android bots use make?
+ return 'ninja'
+ elif platform() == 'ios':
+ return 'xcode'
+ elif IsWindows():
+ return 'msvs'
+ elif IsLinux():
+ return 'ninja'
+ elif IsMac():
+ return 'xcode'
+ else:
+ assert False, 'Don\'t know what builder we\'re using!'
diff --git a/build/landmines.py b/build/landmines.py
index c09ffb887d..f1a514c5e7 100755
--- a/build/landmines.py
+++ b/build/landmines.py
@@ -4,9 +4,6 @@
# found in the LICENSE file.
"""
-This file holds a list of reasons why a particular build needs to be clobbered
-(or a list of 'landmines').
-
This script runs every build as a hook. If it detects that the build should
be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
various build scripts will then check for the presence of this file and clobber
@@ -18,148 +15,18 @@ build is clobbered.
"""
import difflib
-import functools
import gyp_helper
import logging
import optparse
import os
-import shlex
import sys
+import subprocess
import time
-SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-
-def memoize(default=None):
- """This decorator caches the return value of a parameterless pure function"""
- def memoizer(func):
- val = []
- @functools.wraps(func)
- def inner():
- if not val:
- ret = func()
- val.append(ret if ret is not None else default)
- if logging.getLogger().isEnabledFor(logging.INFO):
- print '%s -> %r' % (func.__name__, val[0])
- return val[0]
- return inner
- return memoizer
-
-
-@memoize()
-def IsWindows():
- return sys.platform in ['win32', 'cygwin']
-
-
-@memoize()
-def IsLinux():
- return sys.platform.startswith('linux')
-
-
-@memoize()
-def IsMac():
- return sys.platform == 'darwin'
-
-
-@memoize()
-def gyp_defines():
- """Parses and returns GYP_DEFINES env var as a dictionary."""
- return dict(arg.split('=', 1)
- for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
-
-@memoize()
-def gyp_msvs_version():
- return os.environ.get('GYP_MSVS_VERSION', '')
-
-@memoize()
-def distributor():
- """
- Returns a string which is the distributed build engine in use (if any).
- Possible values: 'goma', 'ib', ''
- """
- if 'goma' in gyp_defines():
- return 'goma'
- elif IsWindows():
- if 'CHROME_HEADLESS' in os.environ:
- return 'ib' # use (win and !goma and headless) as approximation of ib
-
-
-@memoize()
-def platform():
- """
- Returns a string representing the platform this build is targetted for.
- Possible values: 'win', 'mac', 'linux', 'ios', 'android'
- """
- if 'OS' in gyp_defines():
- if 'android' in gyp_defines()['OS']:
- return 'android'
- else:
- return gyp_defines()['OS']
- elif IsWindows():
- return 'win'
- elif IsLinux():
- return 'linux'
- else:
- return 'mac'
+import landmine_utils
-@memoize()
-def builder():
- """
- Returns a string representing the build engine (not compiler) to use.
- Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
- """
- if 'GYP_GENERATORS' in os.environ:
- # for simplicity, only support the first explicit generator
- generator = os.environ['GYP_GENERATORS'].split(',')[0]
- if generator.endswith('-android'):
- return generator.split('-')[0]
- elif generator.endswith('-ninja'):
- return 'ninja'
- else:
- return generator
- else:
- if platform() == 'android':
- # Good enough for now? Do any android bots use make?
- return 'ninja'
- elif platform() == 'ios':
- return 'xcode'
- elif IsWindows():
- return 'msvs'
- elif IsLinux():
- return 'ninja'
- elif IsMac():
- return 'xcode'
- else:
- assert False, 'Don\'t know what builder we\'re using!'
-
-
-def get_landmines(target):
- """
- ALL LANDMINES ARE DEFINED HERE.
- target is 'Release' or 'Debug'
- """
- landmines = []
- add = lambda item: landmines.append(item + '\n')
-
- if (distributor() == 'goma' and platform() == 'win32' and
- builder() == 'ninja'):
- add('Need to clobber winja goma due to backend cwd cache fix.')
- if platform() == 'android':
- add('Clobber: Resources removed in r195014 require clobber.')
- if platform() == 'win' and builder() == 'ninja':
- add('Compile on cc_unittests fails due to symbols removed in r185063.')
- if platform() == 'linux' and builder() == 'ninja':
- add('Builders switching from make to ninja will clobber on this.')
- if platform() == 'mac':
- add('Switching from bundle to unbundled dylib (issue 14743002).')
- if (platform() == 'win' and builder() == 'ninja' and
- gyp_msvs_version() == '2012' and
- gyp_defines().get('target_arch') == 'x64' and
- gyp_defines().get('dcheck_always_on') == '1'):
- add("Switched win x64 trybots from VS2010 to VS2012.")
- add('Need to clobber everything due to an IDL change in r154579 (blink)')
-
- return landmines
+SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_target_build_dir(build_tool, target, is_iphone=False):
@@ -187,16 +54,15 @@ def get_target_build_dir(build_tool, target, is_iphone=False):
return os.path.abspath(ret)
-def set_up_landmines(target):
+def set_up_landmines(target, new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
- out_dir = get_target_build_dir(builder(), target, platform() == 'ios')
+ out_dir = get_target_build_dir(landmine_utils.builder(), target,
+ landmine_utils.platform() == 'ios')
landmines_path = os.path.join(out_dir, '.landmines')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
- new_landmines = get_landmines(target)
-
if not os.path.exists(landmines_path):
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
@@ -219,11 +85,17 @@ def set_up_landmines(target):
def main():
parser = optparse.OptionParser()
+ parser.add_option(
+ '-s', '--landmine-scripts', action='append',
+ default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
+ help='Path to the script which emits landmines to stdout. The target '
+ 'is passed to this script via option -t.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
+
options, args = parser.parse_args()
if args:
@@ -235,7 +107,13 @@ def main():
gyp_helper.apply_chromium_gyp_env()
for target in ('Debug', 'Release', 'Debug_x64', 'Release_x64'):
- set_up_landmines(target)
+ landmines = []
+ for s in options.landmine_scripts:
+ proc = subprocess.Popen([sys.executable, s, '-t', target],
+ stdout=subprocess.PIPE)
+ output, _ = proc.communicate()
+ landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
+ set_up_landmines(target, landmines)
return 0
diff --git a/build/linux/system.gyp b/build/linux/system.gyp
index 1db278f284..c81bc353b9 100644
--- a/build/linux/system.gyp
+++ b/build/linux/system.gyp
@@ -122,9 +122,6 @@
{
'target_name': 'libgps',
'type': 'static_library',
- 'dependencies': [
- '../../base/base.gyp:base',
- ],
'all_dependent_settings': {
'defines': [
'USE_LIBGPS',
@@ -148,6 +145,9 @@
}],
],
},
+ 'include_dirs': [
+ '../..',
+ ],
'hard_dependency': 1,
'actions': [
{
@@ -202,7 +202,6 @@
['use_openssl==0 and use_system_ssl==0', {
'dependencies': [
'../../net/third_party/nss/ssl.gyp:libssl',
- '../../third_party/zlib/zlib.gyp:zlib',
],
'direct_dependent_settings': {
'include_dirs+': [
@@ -325,9 +324,6 @@
'type': 'static_library',
'conditions': [
['use_gio==1 and _toolset=="target"', {
- 'dependencies': [
- '../../base/base.gyp:base',
- ],
'cflags': [
'<!@(<(pkg-config) --cflags gio-2.0)',
],
@@ -342,6 +338,9 @@
'<(SHARED_INTERMEDIATE_DIR)',
],
},
+ 'include_dirs': [
+ '../..',
+ ],
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gio-2.0)',
@@ -401,9 +400,6 @@
'cflags': [
'<!@(<(pkg-config) --cflags libpci)',
],
- 'dependencies': [
- '../../base/base.gyp:base',
- ],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
@@ -421,6 +417,9 @@
}],
],
},
+ 'include_dirs': [
+ '../..',
+ ],
'hard_dependency': 1,
'actions': [
{
@@ -462,9 +461,6 @@
{
'target_name': 'libspeechd',
'type': 'static_library',
- 'dependencies': [
- '../../base/base.gyp:base',
- ],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
@@ -479,6 +475,9 @@
}],
],
},
+ 'include_dirs': [
+ '../..',
+ ],
'hard_dependency': 1,
'actions': [
{
diff --git a/build/linux/unbundle/README b/build/linux/unbundle/README
index 7027b9ad2c..d1b2a966ef 100644
--- a/build/linux/unbundle/README
+++ b/build/linux/unbundle/README
@@ -17,12 +17,28 @@ libraries is the norm.
Usage:
-replace_gyp_files.py <gyp-flags>
+1. remove_bundled_libraries.py <preserved-directories>
-For example: replace_gyp_files.py -Duse_system_harfbuzz=1
+ For example: remove_bundled_libraries.py third_party/mesa
-The script ignores flags other than -D for convenience. This makes it possible
-to have a variable e.g. ${myconf} with all the options, and execute:
+ The script scans sources looking for third_party directories.
+ Everything that is not explicitly preserved is removed (except for
+ gyp files), and the script fails if any directory passed on command
+ line does not exist (to ensure list is kept up to date).
-build/linux/unbundle/replace_gyp_files.py ${myconf}
-build/gyp_chromium ${myconf}
+ This is intended to be used on sources extracted from a tarball,
+ not a repository.
+
+ NOTE: by default this will not remove anything (for safety). Pass
+ --do-remove flag to actually remove files.
+
+2. replace_gyp_files.py <gyp-flags>
+
+ For example: replace_gyp_files.py -Duse_system_harfbuzz=1
+
+ The script ignores flags other than -D for convenience. This makes it
+ possible to have a variable e.g. ${myconf} with all the options, and
+ execute:
+
+ build/linux/unbundle/replace_gyp_files.py ${myconf}
+ build/gyp_chromium ${myconf}
diff --git a/build/linux/unbundle/openssl.gyp b/build/linux/unbundle/openssl.gyp
new file mode 100644
index 0000000000..d832ba7be4
--- /dev/null
+++ b/build/linux/unbundle/openssl.gyp
@@ -0,0 +1,25 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'openssl',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'cflags': [
+ '<!@(pkg-config --cflags openssl)',
+ ],
+ },
+ 'link_settings': {
+ 'ldflags': [
+ '<!@(pkg-config --libs-only-L --libs-only-other openssl)',
+ ],
+ 'libraries': [
+ '<!@(pkg-config --libs-only-l openssl)',
+ ],
+ },
+ }
+ ],
+}
diff --git a/build/linux/unbundle/remove_bundled_libraries.py b/build/linux/unbundle/remove_bundled_libraries.py
new file mode 100755
index 0000000000..09a9c629fd
--- /dev/null
+++ b/build/linux/unbundle/remove_bundled_libraries.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Removes bundled libraries to make sure they are not used.
+
+See README for more details.
+"""
+
+
+import optparse
+import os.path
+import sys
+
+
+def DoMain(argv):
+ my_dirname = os.path.abspath(os.path.dirname(__file__))
+ source_tree_root = os.path.abspath(
+ os.path.join(my_dirname, '..', '..', '..'))
+
+ if os.path.join(source_tree_root, 'build', 'linux', 'unbundle') != my_dirname:
+ print ('Sanity check failed: please run this script from ' +
+ 'build/linux/unbundle directory.')
+ return 1
+
+ parser = optparse.OptionParser()
+ parser.add_option('--do-remove', action='store_true')
+
+ options, args = parser.parse_args(argv)
+
+ exclusion_used = {}
+ for exclusion in args:
+ exclusion_used[exclusion] = False
+
+ for root, dirs, files in os.walk(source_tree_root, topdown=False):
+ # Only look at paths which contain a "third_party" component
+ # (note that e.g. third_party.png doesn't count).
+ root_relpath = os.path.relpath(root, source_tree_root)
+ if 'third_party' not in root_relpath.split(os.sep):
+ continue
+
+ for f in files:
+ path = os.path.join(root, f)
+ relpath = os.path.relpath(path, source_tree_root)
+
+ excluded = False
+ for exclusion in args:
+ if relpath.startswith(exclusion):
+ # Multiple exclusions can match the same path. Go through all of them
+ # and mark each one as used.
+ exclusion_used[exclusion] = True
+ excluded = True
+ if excluded:
+ continue
+
+ # Deleting gyp files almost always leads to gyp failures.
+ # These files come from Chromium project, and can be replaced if needed.
+ if f.endswith('.gyp') or f.endswith('.gypi'):
+ continue
+
+ if options.do_remove:
+ # Delete the file - best way to ensure it's not used during build.
+ os.remove(path)
+ else:
+ # By default just print paths that would be removed.
+ print path
+
+ exit_code = 0
+
+ # Fail if exclusion list contains stale entries - this helps keep it
+ # up to date.
+ for exclusion, used in exclusion_used.iteritems():
+ if not used:
+ print '%s does not exist' % exclusion
+ exit_code = 1
+
+ if not options.do_remove:
+ print ('To actually remove files printed above, please pass ' +
+ '--do-remove flag.')
+
+ return exit_code
+
+
+if __name__ == '__main__':
+ sys.exit(DoMain(sys.argv[1:]))
diff --git a/build/linux/unbundle/replace_gyp_files.py b/build/linux/unbundle/replace_gyp_files.py
index 1436711a8b..c0fcc49f55 100755
--- a/build/linux/unbundle/replace_gyp_files.py
+++ b/build/linux/unbundle/replace_gyp_files.py
@@ -30,6 +30,7 @@ REPLACEMENTS = {
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
+ 'use_system_openssl': 'third_party/openssl/openssl.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
diff --git a/build/shim_headers.gypi b/build/shim_headers.gypi
index 4291468de1..997a4d0b0f 100644
--- a/build/shim_headers.gypi
+++ b/build/shim_headers.gypi
@@ -17,12 +17,19 @@
'<(shim_headers_path)',
],
'all_dependent_settings': {
+ # Repeating this with different numbers of plusses is unfortunately required
+ # to make sure that even if this include is inside nested conditions/etc, it
+ # still gets inserted at the beginning of the include_dirs list. See
+ # http://crbug.com/263818 for details.
'include_dirs+++': [
'<(shim_headers_path)',
],
'include_dirs++++': [
'<(shim_headers_path)',
],
+ 'include_dirs+++++': [
+ '<(shim_headers_path)',
+ ],
},
'actions': [
{
diff --git a/build/slave/OWNERS b/build/slave/OWNERS
new file mode 100644
index 0000000000..c367f57497
--- /dev/null
+++ b/build/slave/OWNERS
@@ -0,0 +1,24 @@
+set noparent
+agable@chromium.org
+agable@google.com
+bevc@chromium.org
+bevc@google.com
+cmp@chromium.org
+cmp@google.com
+dpranke@chromium.org
+iannucci@chromium.org
+iannucci@google.com
+ilevy@chromium.org
+ilevy@google.com
+johnw@chromium.org
+johnw@google.com
+maruel@chromium.org
+maruel@google.com
+mmoss@chromium.org
+mmoss@google.com
+pschmidt@chromium.org
+pschmidt@google.com
+szager@chromium.org
+szager@google.com
+xusydoc@chromium.org
+xusydoc@google.com
diff --git a/build/slave/README b/build/slave/README
new file mode 100644
index 0000000000..e3718b2c28
--- /dev/null
+++ b/build/slave/README
@@ -0,0 +1,8 @@
+This is a directory which contains configuration information for the
+buildsystem.
+
+* Under recipes, the buildsystem should use only this directory as an
+ entry point into src/.
+
+* Scripts in this directory must not import from outside this directory or shell
+ to scripts outside this directory.
diff --git a/build/util/LASTCHANGE b/build/util/LASTCHANGE
index 0ef7af5e43..9422be6bbb 100644
--- a/build/util/LASTCHANGE
+++ b/build/util/LASTCHANGE
@@ -1 +1 @@
-LASTCHANGE=217147
+LASTCHANGE=219274
diff --git a/build/util/LASTCHANGE.blink b/build/util/LASTCHANGE.blink
index 2b4cc5ae4d..d36e8c468f 100644
--- a/build/util/LASTCHANGE.blink
+++ b/build/util/LASTCHANGE.blink
@@ -1 +1 @@
-LASTCHANGE=155942
+LASTCHANGE=156598
diff --git a/build/whitespace_file.txt b/build/whitespace_file.txt
index 405a489430..79e805da0f 100644
--- a/build/whitespace_file.txt
+++ b/build/whitespace_file.txt
@@ -71,3 +71,4 @@ A RELAXED MAN IS NOT NECESSARILY A BETTER MAN
NO ONE SHOULD EVER USE SVN
AN INFLEXIBLE POSITION SOMETIMES IS A SIGN OF PARALYSIS
IT IS MANS FATE TO OUTSMART HIMSELF
+BEING SURE OF YOURSELF MEANS YOU'RE A FOOL