aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/OWNERS1
-rwxr-xr-xtools/bash-completion.sh8
-rw-r--r--tools/blink_tests/TestExpectations28
-rw-r--r--tools/cfi/blacklist.txt4
-rwxr-xr-xtools/check-inline-includes.sh19
-rwxr-xr-xtools/check-name-clashes.py119
-rw-r--r--tools/check-static-initializers.gyp26
-rw-r--r--tools/check-static-initializers.isolate16
-rwxr-xr-xtools/check-unused-bailouts.sh16
m---------tools/clang0
-rw-r--r--tools/compare-table-gen.js120
-rwxr-xr-xtools/cpu.sh39
-rw-r--r--tools/disasm.py4
-rwxr-xr-xtools/eval_gc_nvp.py176
-rwxr-xr-xtools/eval_gc_time.sh107
-rw-r--r--tools/external-reference-check.py3
-rwxr-xr-xtools/fuzz-harness.sh4
-rwxr-xr-xtools/gc-nvp-to-csv.py30
-rwxr-xr-xtools/gc-nvp-trace-processor.py40
-rw-r--r--tools/gc_nvp_common.py32
-rw-r--r--tools/gcmole/gcmole.lua80
-rwxr-xr-xtools/gcmole/parallel.py44
-rw-r--r--tools/gdb-v8-support.py32
-rw-r--r--tools/gdbinit11
-rw-r--r--tools/gen-postmortem-metadata.py104
-rwxr-xr-xtools/grokdump.py258
-rw-r--r--tools/gyp/v8.gyp1140
-rw-r--r--tools/isolate_driver.py65
-rwxr-xr-xtools/js2c.py136
-rw-r--r--tools/jsmin.py18
-rwxr-xr-xtools/ll_prof.py57
-rw-r--r--tools/logreader.js53
-rw-r--r--tools/luci-go/linux64/isolate.sha11
-rw-r--r--tools/luci-go/mac64/isolate.sha11
-rw-r--r--tools/luci-go/win64/isolate.exe.sha11
-rw-r--r--tools/ninja/ninja_output.py44
-rw-r--r--tools/oom_dump/oom_dump.cc42
-rw-r--r--tools/parser-shell.cc75
-rw-r--r--tools/parser-shell.gyp4
-rwxr-xr-xtools/perf-to-html.py378
-rwxr-xr-xtools/presubmit.py187
-rw-r--r--tools/profile.js28
-rwxr-xr-xtools/push-to-trunk/auto_roll.py142
-rwxr-xr-xtools/push-to-trunk/bump_up_version.py247
-rwxr-xr-xtools/push-to-trunk/chromium_roll.py155
-rwxr-xr-xtools/push-to-trunk/generate_version.py78
-rwxr-xr-xtools/release/auto_push.py (renamed from tools/push-to-trunk/auto_push.py)78
-rwxr-xr-xtools/release/auto_roll.py229
-rwxr-xr-xtools/release/auto_tag.py (renamed from tools/push-to-trunk/auto_tag.py)0
-rwxr-xr-xtools/release/check_clusterfuzz.py (renamed from tools/push-to-trunk/check_clusterfuzz.py)36
-rw-r--r--tools/release/common_includes.py (renamed from tools/push-to-trunk/common_includes.py)246
-rwxr-xr-xtools/release/create_release.py299
-rw-r--r--tools/release/git_recipes.py (renamed from tools/push-to-trunk/git_recipes.py)16
-rwxr-xr-xtools/release/merge_to_branch.py (renamed from tools/push-to-trunk/merge_to_branch.py)56
-rwxr-xr-xtools/release/mergeinfo.py113
-rwxr-xr-xtools/release/push_to_candidates.py (renamed from tools/push-to-trunk/push_to_trunk.py)208
-rwxr-xr-xtools/release/releases.py (renamed from tools/push-to-trunk/releases.py)316
-rwxr-xr-xtools/release/script_test.py (renamed from tools/push-to-trunk/script_test.py)0
-rwxr-xr-xtools/release/search_related_commits.py218
-rwxr-xr-xtools/release/test_mergeinfo.py180
-rw-r--r--tools/release/test_scripts.py (renamed from tools/push-to-trunk/test_scripts.py)877
-rwxr-xr-xtools/release/test_search_related_commits.py274
-rwxr-xr-xtools/run-deopt-fuzzer.py16
-rwxr-xr-xtools/run-tests.py392
-rwxr-xr-xtools/run-valgrind.py35
-rwxr-xr-xtools/run_perf.py582
-rw-r--r--tools/shell-utils.h7
m---------tools/swarming_client0
-rwxr-xr-xtools/test-push-to-trunk.sh246
-rw-r--r--tools/testrunner/local/commands.py135
-rw-r--r--tools/testrunner/local/execution.py255
-rw-r--r--tools/testrunner/local/perfdata.py26
-rw-r--r--tools/testrunner/local/pool.py61
-rw-r--r--tools/testrunner/local/pool_unittest.py10
-rw-r--r--tools/testrunner/local/progress.py128
-rw-r--r--tools/testrunner/local/statusfile.py49
-rw-r--r--tools/testrunner/local/testsuite.py119
-rw-r--r--tools/testrunner/local/utils.py15
-rw-r--r--tools/testrunner/network/endpoint.py1
-rw-r--r--tools/testrunner/network/network_execution.py4
-rw-r--r--tools/testrunner/objects/context.py10
-rw-r--r--tools/testrunner/objects/testcase.py27
-rw-r--r--tools/testrunner/testrunner.isolate14
-rw-r--r--tools/tickprocessor-driver.js5
-rw-r--r--tools/tickprocessor.js100
-rwxr-xr-xtools/try_perf.py104
-rw-r--r--tools/unittests/run_perf_test.py71
-rwxr-xr-xtools/v8-info.sh10
-rw-r--r--tools/v8heapconst.py444
-rwxr-xr-xtools/verify_source_deps.py106
-rw-r--r--tools/vim/ninja-build.vim119
-rw-r--r--tools/whitespace.txt4
92 files changed, 7027 insertions, 3387 deletions
diff --git a/tools/OWNERS b/tools/OWNERS
new file mode 100644
index 00000000..0e56af31
--- /dev/null
+++ b/tools/OWNERS
@@ -0,0 +1 @@
+machenbach@chromium.org
diff --git a/tools/bash-completion.sh b/tools/bash-completion.sh
index 6e324246..5b9f7f50 100755
--- a/tools/bash-completion.sh
+++ b/tools/bash-completion.sh
@@ -39,12 +39,16 @@ _v8_flag() {
cur="${COMP_WORDS[COMP_CWORD]}"
defines=$(cat $v8_source/src/flag-definitions.h \
| grep "^DEFINE" \
- | grep -v "DEFINE_implication" \
+ | grep -v "DEFINE_IMPLICATION" \
+ | sed -e 's/_/-/g'; \
+ cat $v8_source/src/flag-definitions.h \
+ | grep "^ V(harmony_" \
+ | sed -e 's/^ V/DEFINE-BOOL/' \
| sed -e 's/_/-/g')
targets=$(echo "$defines" \
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
- | sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
+ | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
cat $v8_source/src/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
diff --git a/tools/blink_tests/TestExpectations b/tools/blink_tests/TestExpectations
index 728906f4..3655c5c9 100644
--- a/tools/blink_tests/TestExpectations
+++ b/tools/blink_tests/TestExpectations
@@ -1,27 +1,5 @@
-# Tests that sometimes fail only on the V8 waterfall:
-[ Linux Release x86 ] fast/text/atsui-multiple-renderers.html [ Pass Failure Slow ]
-[ Linux Release x86 ] fast/text/international/complex-joining-using-gpos.html [ Pass Failure Slow ]
-[ Linux Release x86 ] fast/text/international/danda-space.html [ Pass Failure Slow ]
-[ Linux Release x86 ] fast/text/international/thai-baht-space.html [ Pass Failure Slow ]
-[ Linux Release x86 ] fast/text/international/thai-line-breaks.html [ Pass Failure Slow ]
-[ Linux Release x86 ] inspector/profiler/memory-instrumentation-external-array.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] fast/text/atsui-multiple-renderers.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] fast/text/international/complex-joining-using-gpos.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] fast/text/international/danda-space.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] fast/text/international/thai-baht-space.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] fast/text/international/thai-line-breaks.html [ Pass Failure Slow ]
-[ Linux Release x86_64 ] inspector/profiler/memory-instrumentation-external-array.html [ Pass Failure Slow ]
-[ Linux Debug ] fast/text/atsui-multiple-renderers.html [ Pass Failure Slow ]
-[ Linux Debug ] fast/text/international/complex-joining-using-gpos.html [ Pass Failure Slow ]
-[ Linux Debug ] fast/text/international/danda-space.html [ Pass Failure Slow ]
-[ Linux Debug ] fast/text/international/thai-baht-space.html [ Pass Failure Slow ]
-[ Linux Debug ] fast/text/international/thai-line-breaks.html [ Pass Failure Slow ]
-crbug.com/108833 [ Win Debug ] plugins/geturlnotify-during-document-teardown.html [ Crash Failure Timeout ]
-webkit.org/b/48655 [ Win ] plugins/js-from-destroy.html [ Crash Timeout ]
-crbug.com/178745 [ Win Debug ] plugins/open-and-close-window-with-plugin.html [ Crash Failure Timeout ]
+[ Linux ] virtual/pointerevent/fast/events/mouse-cursor-style-change-iframe.html [ Skip ]
-# Slow on the trunk builder:
-[ Linux Debug ] fast/js/regress/function-dot-apply.html [ Slow ]
-crbug.com/249894 [ Linux Debug ] fast/js/regress/inline-arguments-access.html [ Pass Failure Crash Slow ]
-[ Linux Debug ] fast/js/regress/inline-arguments-local-escape.html [ Slow ]
+# Turn off Slimming Paint tests on linux.
+[ Linux ] virtual/slimmingpaint/ [ Skip ]
diff --git a/tools/cfi/blacklist.txt b/tools/cfi/blacklist.txt
new file mode 100644
index 00000000..0ad565ea
--- /dev/null
+++ b/tools/cfi/blacklist.txt
@@ -0,0 +1,4 @@
+# All std:: types
+# This should be possible to remove, if/when we build against
+# a statically linked libc++.
+type:std::* \ No newline at end of file
diff --git a/tools/check-inline-includes.sh b/tools/check-inline-includes.sh
new file mode 100755
index 00000000..536afb1d
--- /dev/null
+++ b/tools/check-inline-includes.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+headers=$(find "$v8_root/src" -name '*.h' -not -name '*-inl.h')
+
+for header in $headers; do
+ inline_header_include=$(grep '#include ".*-inl.h"' "$header")
+ if [ -n "$inline_header_include" ]; then
+ echo "The following non-inline header seems to include an inline header:"
+ echo " Header : $header"
+ echo " Include: $inline_header_include"
+ echo
+ fi
+done
+
+echo "Kthxbye."
diff --git a/tools/check-name-clashes.py b/tools/check-name-clashes.py
deleted file mode 100755
index 89a7dee7..00000000
--- a/tools/check-name-clashes.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import js2c
-import os
-import re
-import sys
-
-FILENAME = "src/runtime/runtime.h"
-LISTHEAD = re.compile(r"#define\s+(\w+LIST\w*)\((\w+)\)")
-LISTBODY = re.compile(r".*\\$")
-BLACKLIST = ['INLINE_FUNCTION_LIST']
-
-
-class Function(object):
- def __init__(self, match):
- self.name = match.group(1).strip()
-
-def ListMacroRe(list):
- macro = LISTHEAD.match(list[0]).group(2)
- re_string = "\s*%s\((\w+)" % macro
- return re.compile(re_string)
-
-
-def FindLists(filename):
- lists = []
- current_list = []
- mode = "SEARCHING"
- with open(filename, "r") as f:
- for line in f:
- if mode == "SEARCHING":
- match = LISTHEAD.match(line)
- if match and match.group(1) not in BLACKLIST:
- mode = "APPENDING"
- current_list.append(line)
- else:
- current_list.append(line)
- match = LISTBODY.match(line)
- if not match:
- mode = "SEARCHING"
- lists.append(current_list)
- current_list = []
- return lists
-
-
-# Detects runtime functions by parsing FILENAME.
-def FindRuntimeFunctions():
- functions = []
- lists = FindLists(FILENAME)
- for list in lists:
- function_re = ListMacroRe(list)
- for line in list:
- match = function_re.match(line)
- if match:
- functions.append(Function(match))
- return functions
-
-
-class Builtin(object):
- def __init__(self, match):
- self.name = match.group(1)
-
-
-def FindJSNatives():
- PATH = "src"
- fileslist = []
- for (root, dirs, files) in os.walk(PATH):
- for f in files:
- if f.endswith(".js"):
- fileslist.append(os.path.join(root, f))
- natives = []
- regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
- matches = 0
- for filename in fileslist:
- with open(filename, "r") as f:
- file_contents = f.read()
- file_contents = js2c.ExpandInlineMacros(file_contents)
- lines = file_contents.split("\n")
- partial_line = ""
- for line in lines:
- if line.startswith("function") and not '{' in line:
- partial_line += line.rstrip()
- continue
- if partial_line:
- partial_line += " " + line.strip()
- if '{' in line:
- line = partial_line
- partial_line = ""
- else:
- continue
- match = regexp.match(line)
- if match:
- natives.append(Builtin(match))
- return natives
-
-
-def Main():
- functions = FindRuntimeFunctions()
- natives = FindJSNatives()
- errors = 0
- runtime_map = {}
- for f in functions:
- runtime_map[f.name] = 1
- for b in natives:
- if b.name in runtime_map:
- print("JS_Native/Runtime_Function name clash: %s" % b.name)
- errors += 1
-
- if errors > 0:
- return 1
- print("Runtime/Natives name clashes: checked %d/%d functions, all good." %
- (len(functions), len(natives)))
- return 0
-
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/tools/check-static-initializers.gyp b/tools/check-static-initializers.gyp
new file mode 100644
index 00000000..547a6c87
--- /dev/null
+++ b/tools/check-static-initializers.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'check_static_initializers_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'check-static-initializers.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/tools/check-static-initializers.isolate b/tools/check-static-initializers.isolate
new file mode 100644
index 00000000..d1197d3d
--- /dev/null
+++ b/tools/check-static-initializers.isolate
@@ -0,0 +1,16 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'check-static-initializers.sh',
+ ],
+ 'files': [
+ 'check-static-initializers.sh',
+ ],
+ },
+ 'includes': [
+ '../src/d8.isolate',
+ ],
+}
diff --git a/tools/check-unused-bailouts.sh b/tools/check-unused-bailouts.sh
new file mode 100755
index 00000000..da4d4a7f
--- /dev/null
+++ b/tools/check-unused-bailouts.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+bailouts=$(grep -oP 'V\(\K(k[^,]*)' "$v8_root/src/bailout-reason.h")
+
+for bailout in $bailouts; do
+ bailout_uses=$(grep -r $bailout "$v8_root/src" "$v8_root/test/cctest" | wc -l)
+ if [ $bailout_uses -eq "1" ]; then
+ echo "Bailout reason \"$bailout\" seems to be unused."
+ fi
+done
+
+echo "Kthxbye."
diff --git a/tools/clang b/tools/clang
new file mode 160000
+Subproject a00149535c011c08b6e8cc583a1f10f38d3cdaf
diff --git a/tools/compare-table-gen.js b/tools/compare-table-gen.js
new file mode 100644
index 00000000..e0c870da
--- /dev/null
+++ b/tools/compare-table-gen.js
@@ -0,0 +1,120 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generates a comparison table test case.
+// Usage: d8 compare-table-gen.js -- lt|lteq|gt|gteq|eq|ne|eq|sne|min|max
+
+var strings = ["true", "false", "null", "void 0", "0", "0.0", "-0", "\"\"", "-1", "-1.25", "1", "1.25", "-2147483648", "2147483648", "Infinity", "-Infinity", "NaN"];
+var values = new Array(strings.length);
+for (var i = 0; i < strings.length; i++) {
+ values[i] = eval(strings[i]);
+}
+
+function test() {
+ for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < values.length; j++) {
+ var a = values[i];
+ var b = values[j];
+ var x = expected[i][j];
+ assertEquals(x, func(a,b));
+ assertEquals(x, left_funcs[i](b));
+ assertEquals(x, right_funcs[j](a));
+ }
+ }
+
+ var result = matrix();
+ for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < values.length; j++) {
+ assertEquals(expected[i][j], result[i][j]);
+ }
+ }
+}
+
+function expr(infix, a, cmp, b) {
+ return infix ? a + " " + cmp + " " + b : cmp + "(" + a + ", " + b + ")";
+}
+
+function SpecialToString(x) {
+ if ((1 / x) == -Infinity) return "-0";
+ return "" + x;
+}
+
+function gen(name, cmp, infix) {
+
+ print("// Copyright 2015 the V8 project authors. All rights reserved.");
+ print("// Use of this source code is governed by a BSD-style license that can be");
+ print("// found in the LICENSE file.");
+ print();
+ print("var values = [" + strings + "];");
+
+ var body = "(function " + name + "(a,b) { return " + expr(infix, "a", cmp, "b") + "; })";
+ var func = eval(body);
+
+ print("var expected = [");
+
+ for (var i = 0; i < values.length; i++) {
+ var line = " [";
+ for (var j = 0; j < values.length; j++) {
+ if (j > 0) line += ",";
+ line += SpecialToString(func(values[i], values[j]));
+ }
+ line += "]";
+ if (i < (values.length - 1)) line += ",";
+ print(line);
+ }
+ print("];");
+
+ print("var func = " + body + ";");
+ print("var left_funcs = [");
+
+ for (var i = 0; i < values.length; i++) {
+ var value = strings[i];
+ var body = "(function " + name + "_L" + i + "(b) { return " + expr(infix, value, cmp, "b") + "; })";
+ var end = i < (values.length - 1) ? "," : "";
+ print(" " + body + end);
+ }
+ print("];");
+
+ print("var right_funcs = [");
+ for (var i = 0; i < values.length; i++) {
+ var value = strings[i];
+ var body = "(function " + name + "_R" + i + "(a) { return " + expr(infix, "a", cmp, value) + "; })";
+ var end = i < (values.length - 1) ? "," : "";
+ print(" " + body + end);
+ }
+ print("];");
+
+ print("function matrix() {");
+ print(" return [");
+ for (var i = 0; i < values.length; i++) {
+ var line = " [";
+ for (var j = 0; j < values.length; j++) {
+ if (j > 0) line += ",";
+ line += expr(infix, strings[i], cmp, strings[j]);
+ }
+ line += "]";
+ if (i < (values.length - 1)) line += ",";
+ print(line);
+ }
+ print(" ];");
+ print("}");
+
+
+ print(test.toString());
+ print("test();");
+ print("test();");
+}
+
+switch (arguments[0]) {
+ case "lt": gen("lt", "<", true); break;
+ case "lteq": gen("lteq", "<=", true); break;
+ case "gt": gen("gt", ">", true); break;
+ case "gteq": gen("gteq", ">=", true); break;
+ case "eq": gen("eq", "==", true); break;
+ case "ne": gen("ne", "!=", true); break;
+ case "seq": gen("seq", "===", true); break;
+ case "sne": gen("sne", "!==", true); break;
+ case "min": gen("min", "Math.min", false); break;
+ case "max": gen("max", "Math.max", false); break;
+}
diff --git a/tools/cpu.sh b/tools/cpu.sh
index 8e8a243c..5634cac9 100755
--- a/tools/cpu.sh
+++ b/tools/cpu.sh
@@ -14,26 +14,38 @@ set_governor() {
done
}
+enable_cores() {
+ # $1: How many cores to enable.
+ for (( i=1; i<=$MAXID; i++ )); do
+ if [ "$i" -lt "$1" ]; then
+ echo 1 > $CPUPATH/cpu$i/online
+ else
+ echo 0 > $CPUPATH/cpu$i/online
+ fi
+ done
+}
+
dual_core() {
echo "Switching to dual-core mode"
- for (( i=2; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 2
}
single_core() {
echo "Switching to single-core mode"
- for (( i=1; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 1
}
all_cores() {
echo "Reactivating all CPU cores"
- for (( i=2; i<=$MAXID; i++ )); do
- echo 1 > $CPUPATH/cpu$i/online
- done
+ enable_cores $((MAXID+1))
+}
+
+
+limit_cores() {
+ # $1: How many cores to enable.
+ echo "Limiting to $1 cores"
+ enable_cores $1
}
case "$1" in
@@ -55,8 +67,15 @@ case "$1" in
allcores | all)
all_cores
;;
+ limit_cores)
+ if [ $# -ne 2 ]; then
+ echo "Usage $0 limit_cores <num>"
+ exit 1
+ fi
+ limit_cores $2
+ ;;
*)
- echo "Usage: $0 fast|slow|default|singlecore|dualcore|all"
+ echo "Usage: $0 fast|slow|default|singlecore|dualcore|all|limit_cores"
exit 1
;;
esac
diff --git a/tools/disasm.py b/tools/disasm.py
index cc7ef062..f409cb00 100644
--- a/tools/disasm.py
+++ b/tools/disasm.py
@@ -60,7 +60,9 @@ def GetDisasmLines(filename, offset, size, arch, inplace, arch_flags=""):
# Create a temporary file containing a copy of the code.
assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch
arch_flags = arch_flags + " " + _ARCH_MAP[arch]
- tmp_name = tempfile.mktemp(".v8code")
+ tmp_file = tempfile.NamedTemporaryFile(prefix=".v8code", delete=False)
+ tmp_name = tmp_file.name
+ tmp_file.close()
command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \
"%s %s -D -b binary %s %s" % (
filename, tmp_name, size, offset,
diff --git a/tools/eval_gc_nvp.py b/tools/eval_gc_nvp.py
new file mode 100755
index 00000000..f18a5793
--- /dev/null
+++ b/tools/eval_gc_nvp.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This script is used to analyze GCTracer's NVP output."""
+
+
+from argparse import ArgumentParser
+from copy import deepcopy
+from gc_nvp_common import split_nvp
+from math import log
+from sys import stdin
+
+
+class LinearBucket:
+ def __init__(self, granularity):
+ self.granularity = granularity
+
+ def value_to_bucket(self, value):
+ return int(value / self.granularity)
+
+ def bucket_to_range(self, bucket):
+ return (bucket * self.granularity, (bucket + 1) * self.granularity)
+
+
+class Log2Bucket:
+ def __init__(self, start):
+ self.start = int(log(start, 2)) - 1
+
+ def value_to_bucket(self, value):
+ index = int(log(value, 2))
+ index -= self.start
+ if index < 0:
+ index = 0
+ return index
+
+ def bucket_to_range(self, bucket):
+ if bucket == 0:
+ return (0, 2 ** (self.start + 1))
+ bucket += self.start
+ return (2 ** bucket, 2 ** (bucket + 1))
+
+
+class Histogram:
+ def __init__(self, bucket_trait, fill_empty):
+ self.histogram = {}
+ self.fill_empty = fill_empty
+ self.bucket_trait = bucket_trait
+
+ def add(self, key):
+ index = self.bucket_trait.value_to_bucket(key)
+ if index not in self.histogram:
+ self.histogram[index] = 0
+ self.histogram[index] += 1
+
+ def __str__(self):
+ ret = []
+ keys = self.histogram.keys()
+ keys.sort()
+ last = keys[len(keys) - 1]
+ for i in range(0, last + 1):
+ (min_value, max_value) = self.bucket_trait.bucket_to_range(i)
+ if i == keys[0]:
+ keys.pop(0)
+ ret.append(" [{0},{1}[: {2}".format(
+ str(min_value), str(max_value), self.histogram[i]))
+ else:
+ if self.fill_empty:
+ ret.append(" [{0},{1}[: {2}".format(
+ str(min_value), str(max_value), 0))
+ return "\n".join(ret)
+
+
+class Category:
+ def __init__(self, key, histogram):
+ self.key = key
+ self.values = []
+ self.histogram = histogram
+
+ def process_entry(self, entry):
+ if self.key in entry:
+ self.values.append(float(entry[self.key]))
+ if self.histogram:
+ self.histogram.add(float(entry[self.key]))
+
+ def min(self):
+ return min(self.values)
+
+ def max(self):
+ return max(self.values)
+
+ def avg(self):
+ return sum(self.values) / len(self.values)
+
+ def __str__(self):
+ ret = [self.key]
+ ret.append(" len: {0}".format(len(self.values)))
+ if len(self.values) > 0:
+ ret.append(" min: {0}".format(min(self.values)))
+ ret.append(" max: {0}".format(max(self.values)))
+ ret.append(" avg: {0}".format(sum(self.values) / len(self.values)))
+ if self.histogram:
+ ret.append(str(self.histogram))
+ return "\n".join(ret)
+
+ def __repr__(self):
+ return "<Category: {0}>".format(self.key)
+
+
+def make_key_func(cmp_metric):
+ def key_func(a):
+ return getattr(a, cmp_metric)()
+ return key_func
+
+
+def main():
+ parser = ArgumentParser(description="Process GCTracer's NVP output")
+ parser.add_argument('keys', metavar='KEY', type=str, nargs='+',
+ help='the keys of NVPs to process')
+ parser.add_argument('--histogram-type', metavar='<linear|log2>',
+ type=str, nargs='?', default="linear",
+ help='histogram type to use (default: linear)')
+ linear_group = parser.add_argument_group('linear histogram specific')
+ linear_group.add_argument('--linear-histogram-granularity',
+ metavar='GRANULARITY', type=int, nargs='?',
+ default=5,
+ help='histogram granularity (default: 5)')
+ log2_group = parser.add_argument_group('log2 histogram specific')
+ log2_group.add_argument('--log2-histogram-init-bucket', metavar='START',
+ type=int, nargs='?', default=64,
+ help='initial buck size (default: 64)')
+ parser.add_argument('--histogram-omit-empty-buckets',
+ dest='histogram_omit_empty',
+ action='store_true',
+ help='omit empty histogram buckets')
+ parser.add_argument('--no-histogram', dest='histogram',
+ action='store_false', help='do not print histogram')
+ parser.set_defaults(histogram=True)
+ parser.set_defaults(histogram_omit_empty=False)
+ parser.add_argument('--rank', metavar='<no|min|max|avg>',
+ type=str, nargs='?',
+ default="no",
+ help="rank keys by metric (default: no)")
+ args = parser.parse_args()
+
+ histogram = None
+ if args.histogram:
+ bucket_trait = None
+ if args.histogram_type == "log2":
+ bucket_trait = Log2Bucket(args.log2_histogram_init_bucket)
+ else:
+ bucket_trait = LinearBucket(args.linear_histogram_granularity)
+ histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
+
+ categories = [ Category(key, deepcopy(histogram))
+ for key in args.keys ]
+
+ while True:
+ line = stdin.readline()
+ if not line:
+ break
+ obj = split_nvp(line)
+ for category in categories:
+ category.process_entry(obj)
+
+ if args.rank != "no":
+ categories = sorted(categories, key=make_key_func(args.rank), reverse=True)
+
+ for category in categories:
+ print(category)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/eval_gc_time.sh b/tools/eval_gc_time.sh
new file mode 100755
index 00000000..92246d38
--- /dev/null
+++ b/tools/eval_gc_time.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Convenience Script used to rank GC NVP output.
+
+print_usage_and_die() {
+ echo "Usage: $0 new-gen-rank|old-gen-rank max|avg logfile"
+ exit 1
+}
+
+if [ $# -ne 3 ]; then
+ print_usage_and_die
+fi
+
+case $1 in
+ new-gen-rank|old-gen-rank)
+ OP=$1
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+case $2 in
+ max|avg)
+ RANK_MODE=$2
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+LOGFILE=$3
+
+GENERAL_INTERESTING_KEYS="\
+ pause \
+"
+
+INTERESTING_NEW_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ scavenge \
+ weak \
+ roots \
+ old_new \
+ code \
+ semispace \
+ object_groups \
+"
+
+INTERESTING_OLD_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ external \
+ clear \
+ clear.code_flush \
+ clear.dependent_code \
+ clear.global_handles \
+ clear.maps \
+ clear.slots_buffer \
+ clear.store_buffer \
+ clear.string_table \
+ clear.weak_cells \
+ clear.weak_collections \
+ clear.weak_lists \
+ finish \
+ evacuate \
+ evacuate.candidates \
+ evacuate.clean_up \
+ evacuate.new_space \
+ evacuate.update_pointers \
+ evacuate.update_pointers.between_evacuated \
+ evacuate.update_pointers.to_evacuated \
+ evacuate.update_pointers.to_new \
+ evacuate.update_pointers.weak \
+ mark \
+ mark.finish_incremental \
+ mark.prepare_code_flush \
+ mark.roots \
+ mark.weak_closure \
+ sweep \
+ sweep.code \
+ sweep.map \
+ sweep.old \
+ incremental_finalize \
+"
+
+BASE_DIR=$(dirname $0)
+
+case $OP in
+ new-gen-rank)
+ cat $LOGFILE | grep "gc=s" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_NEW_GEN_KEYS}
+ ;;
+ old-gen-rank)
+ cat $LOGFILE | grep "gc=ms" | grep "reduce_memory=0" | grep -v "steps=0" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_OLD_GEN_KEYS}
+ ;;
+ *)
+ ;;
+esac
+
diff --git a/tools/external-reference-check.py b/tools/external-reference-check.py
index 386d4a9e..287eca42 100644
--- a/tools/external-reference-check.py
+++ b/tools/external-reference-check.py
@@ -8,7 +8,7 @@ import os
import sys
DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/serialize.cc"
+REGISTER_FILE = "src/snapshot/serialize.cc"
DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
@@ -16,6 +16,7 @@ WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
# Ignore those.
BLACKLISTED = [
+ "fixed_typed_array_base_data_offset",
"page_flags",
"math_exp_constants",
"math_exp_log_table",
diff --git a/tools/fuzz-harness.sh b/tools/fuzz-harness.sh
index cef59868..31023de3 100755
--- a/tools/fuzz-harness.sh
+++ b/tools/fuzz-harness.sh
@@ -85,7 +85,9 @@ python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
"$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
exit_code=$(cat w* | grep " looking good" -c)
exit_code=$((100-exit_code))
-tar -cjf fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2 err-* w*
+archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+echo "Creating archive $archive"
+tar -cjf $archive err-* w*
rm -f err-* w*
echo "Total failures: $exit_code"
diff --git a/tools/gc-nvp-to-csv.py b/tools/gc-nvp-to-csv.py
new file mode 100755
index 00000000..26ed8e1c
--- /dev/null
+++ b/tools/gc-nvp-to-csv.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# This is an utility for generating csv files based on GC traces produced by
+# V8 when run with flags --trace-gc --trace-gc-nvp.
+#
+# Usage: gc-nvp-to-csv.py <GC-trace-filename>
+#
+
+import sys
+import gc_nvp_common
+
+def process_trace(filename):
+ trace = gc_nvp_common.parse_gc_trace(filename)
+ if len(trace):
+ keys = trace[0].keys()
+ print ', '.join(keys)
+ for entry in trace:
+ print ', '.join(map(lambda key: str(entry[key]), keys))
+
+
+if len(sys.argv) != 2:
+ print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+ sys.exit(1)
+
+process_trace(sys.argv[1])
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
index fe5a7f36..21526ae9 100755
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -38,44 +38,14 @@
from __future__ import with_statement
-import sys, types, re, subprocess, math
+import sys, types, subprocess, math
+import gc_nvp_common
def flatten(l):
flat = []
for i in l: flat.extend(i)
return flat
-def split_nvp(s):
- t = {}
- for (name, value) in re.findall(r"(\w+)=([-\w]+)", s):
- try:
- t[name] = int(value)
- except ValueError:
- t[name] = value
-
- return t
-
-def parse_gc_trace(input):
- trace = []
- with open(input) as f:
- for line in f:
- info = split_nvp(line)
- if info and 'pause' in info and info['pause'] > 0:
- info['i'] = len(trace)
- trace.append(info)
- return trace
-
-def extract_field_names(script):
- fields = { 'data': true, 'in': true }
-
- for m in re.finditer(r"$(\w+)", script):
- field_name = m.group(1)
- if field_name not in fields:
- fields[field] = field_count
- field_count = field_count + 1
-
- return fields
-
def gnuplot(script):
gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
gnuplot.stdin.write(script)
@@ -228,7 +198,7 @@ def scavenge_scope(r):
def real_mutator(r):
- return r['mutator'] - r['stepstook']
+ return r['mutator'] - r['steps_took']
plots = [
[
@@ -240,7 +210,7 @@ plots = [
Item('Sweep', 'sweep', lc = 'blue'),
Item('External', 'external', lc = '#489D43'),
Item('Other', other_scope, lc = 'grey'),
- Item('IGC Steps', 'stepstook', lc = '#FF6347'))
+ Item('IGC Steps', 'steps_took', lc = '#FF6347'))
],
[
Set('style fill solid 0.5 noborder'),
@@ -304,7 +274,7 @@ def count_nonzero(trace, field):
def process_trace(filename):
- trace = parse_gc_trace(filename)
+ trace = gc_nvp_common.parse_gc_trace(filename)
marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
scavenges = filter(lambda r: r['gc'] == 's', trace)
diff --git a/tools/gc_nvp_common.py b/tools/gc_nvp_common.py
new file mode 100644
index 00000000..3b517313
--- /dev/null
+++ b/tools/gc_nvp_common.py
@@ -0,0 +1,32 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Common code for parsing --trace-gc-nvp output.
+#
+
+
+from __future__ import with_statement
+import re
+
+def split_nvp(s):
+ t = {}
+ for (name, value) in re.findall(r"([._\w]+)=([-\w]+(?:\.[0-9]+)?)", s):
+ try:
+ t[name] = float(value)
+ except ValueError:
+ t[name] = value
+
+ return t
+
+
+def parse_gc_trace(input):
+ trace = []
+ with open(input) as f:
+ for line in f:
+ info = split_nvp(line)
+ if info and 'pause' in info and info['pause'] > 0:
+ info['i'] = len(trace)
+ trace.append(info)
+ return trace
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index d287f7b9..97396846 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -34,6 +34,9 @@ local FLAGS = {
-- Do not build gcsuspects file and reuse previously generated one.
reuse_gcsuspects = false;
+ -- Don't use parallel python runner.
+ sequential = false;
+
-- Print commands to console before executing them.
verbose = false;
@@ -90,7 +93,8 @@ if not CLANG_PLUGINS or CLANG_PLUGINS == "" then
CLANG_PLUGINS = DIR
end
-local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
+local function MakeClangCommandLine(
+ plugin, plugin_args, triple, arch_define, arch_options)
if plugin_args then
for i = 1, #plugin_args do
plugin_args[i] = "-Xclang -plugin-arg-" .. plugin
@@ -109,21 +113,69 @@ local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
.. " -I./"
.. " -Ithird_party/icu/source/common"
.. " -Ithird_party/icu/source/i18n"
+ .. " " .. arch_options
+end
+
+local function IterTable(t)
+ return coroutine.wrap(function ()
+ for i, v in ipairs(t) do
+ coroutine.yield(v)
+ end
+ end)
+end
+
+local function SplitResults(lines, func)
+ -- Splits the output of parallel.py and calls func on each result.
+ -- Bails out in case of an error in one of the executions.
+ local current = {}
+ local filename = ""
+ for line in lines do
+ local new_file = line:match "^______________ (.*)$"
+ local code = line:match "^______________ finish (%d+) ______________$"
+ if code then
+ if tonumber(code) > 0 then
+ log(table.concat(current, "\n"))
+ log("Failed to examine " .. filename)
+ return false
+ end
+ log("-- %s", filename)
+ func(filename, IterTable(current))
+ elseif new_file then
+ filename = new_file
+ current = {}
+ else
+ table.insert(current, line)
+ end
+ end
+ return true
end
function InvokeClangPluginForEachFile(filenames, cfg, func)
local cmd_line = MakeClangCommandLine(cfg.plugin,
cfg.plugin_args,
cfg.triple,
- cfg.arch_define)
- for _, filename in ipairs(filenames) do
- log("-- %s", filename)
- local action = cmd_line .. " " .. filename .. " 2>&1"
+ cfg.arch_define,
+ cfg.arch_options)
+ if FLAGS.sequential then
+ log("** Sequential execution.")
+ for _, filename in ipairs(filenames) do
+ log("-- %s", filename)
+ local action = cmd_line .. " " .. filename .. " 2>&1"
+ if FLAGS.verbose then print('popen ', action) end
+ local pipe = io.popen(action)
+ func(filename, pipe:lines())
+ local success = pipe:close()
+ if not success then error("Failed to run: " .. action) end
+ end
+ else
+ log("** Parallel execution.")
+ local action = "python tools/gcmole/parallel.py \""
+ .. cmd_line .. "\" " .. table.concat(filenames, " ")
if FLAGS.verbose then print('popen ', action) end
local pipe = io.popen(action)
- func(filename, pipe:lines())
- local success = pipe:close()
- if not success then error("Failed to run: " .. action) end
+ local success = SplitResults(pipe:lines(), func)
+ local closed = pipe:close()
+ if not (success and closed) then error("Failed to run: " .. action) end
end
end
@@ -201,13 +253,17 @@ end
local ARCHITECTURES = {
ia32 = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_IA32" },
+ arch_define = "V8_TARGET_ARCH_IA32",
+ arch_options = "-m32" },
arm = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_ARM" },
+ arch_define = "V8_TARGET_ARCH_ARM",
+ arch_options = "-m32" },
x64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_X64" },
+ arch_define = "V8_TARGET_ARCH_X64",
+ arch_options = "" },
arm64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_ARM64" },
+ arch_define = "V8_TARGET_ARCH_ARM64",
+ arch_options = "" },
}
-------------------------------------------------------------------------------
diff --git a/tools/gcmole/parallel.py b/tools/gcmole/parallel.py
new file mode 100755
index 00000000..0c045f42
--- /dev/null
+++ b/tools/gcmole/parallel.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This script calls the first argument for each of the following arguments in
+parallel. E.g.
+parallel.py "clang --opt" file1 file2
+calls
+clang --opt file1
+clang --opt file2
+
+The output (stdout and stderr) is concatenated sequentially in the form:
+______________ file1
+<output of clang --opt file1>
+______________ finish <exit code of clang --opt file1> ______________
+______________ file2
+<output of clang --opt file2>
+______________ finish <exit code of clang --opt file2> ______________
+"""
+
+import itertools
+import multiprocessing
+import subprocess
+import sys
+
+def invoke(cmdline):
+ try:
+ return (subprocess.check_output(
+ cmdline, shell=True, stderr=subprocess.STDOUT), 0)
+ except subprocess.CalledProcessError as e:
+ return (e.output, e.returncode)
+
+if __name__ == '__main__':
+ assert len(sys.argv) > 2
+ processes = multiprocessing.cpu_count()
+ pool = multiprocessing.Pool(processes=processes)
+ cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
+ for filename, result in itertools.izip(
+ sys.argv[2:], pool.imap(invoke, cmdlines)):
+ print "______________ %s" % filename
+ print result[0]
+ print "______________ finish %d ______________" % result[1]
diff --git a/tools/gdb-v8-support.py b/tools/gdb-v8-support.py
index 9cc046c7..5d26146f 100644
--- a/tools/gdb-v8-support.py
+++ b/tools/gdb-v8-support.py
@@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import re
kSmiTag = 0
kSmiTagSize = 1
@@ -152,3 +153,34 @@ class V8PrintObject (gdb.Command):
v = v8_get_value(arg)
gdb.execute('call __gdb_print_v8_object(%d)' % v)
V8PrintObject()
+
+
+class FindAnywhere (gdb.Command):
+ """Search memory for the given pattern."""
+ MAPPING_RE = re.compile(r"^\s*\[\d+\]\s+0x([0-9A-Fa-f]+)->0x([0-9A-Fa-f]+)")
+ LIVE_MAPPING_RE = re.compile(r"^\s+0x([0-9A-Fa-f]+)\s+0x([0-9A-Fa-f]+)")
+ def __init__ (self):
+ super (FindAnywhere, self).__init__ ("find-anywhere", gdb.COMMAND_DATA)
+ def find (self, startAddr, endAddr, value):
+ try:
+ result = gdb.execute(
+ "find 0x%s, 0x%s, %s" % (startAddr, endAddr, value),
+ to_string = True)
+ if result.find("not found") == -1:
+ print result
+ except:
+ pass
+
+ def invoke (self, value, from_tty):
+ for l in gdb.execute("maint info sections", to_string = True).split('\n'):
+ m = FindAnywhere.MAPPING_RE.match(l)
+ if m is None:
+ continue
+ self.find(m.group(1), m.group(2), value)
+ for l in gdb.execute("info proc mappings", to_string = True).split('\n'):
+ m = FindAnywhere.LIVE_MAPPING_RE.match(l)
+ if m is None:
+ continue
+ self.find(m.group(1), m.group(2), value)
+
+FindAnywhere()
diff --git a/tools/gdbinit b/tools/gdbinit
index 8d0345af..5e6af9d6 100644
--- a/tools/gdbinit
+++ b/tools/gdbinit
@@ -20,6 +20,15 @@ Print a v8 Code object from an internal code address
Usage: jco pc
end
+# Print TypeFeedbackVector
+define jfv
+print ((v8::internal::TypeFeedbackVector*)($arg0))->Print()
+end
+document jfv
+Print a v8 TypeFeedbackVector object
+Usage: jtv tagged_ptr
+end
+
# Print DescriptorArray.
define jda
print ((v8::internal::DescriptorArray*)($arg0))->Print()
@@ -40,7 +49,7 @@ end
# Print JavaScript stack trace.
define jst
-print v8::internal::Isolate::Current()->PrintStack((FILE*) stdout)
+print v8::internal::Isolate::Current()->PrintStack((FILE*) stdout, 1)
end
document jst
Print the current JavaScript stack trace
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
index 04a1ea87..516f8e74 100644
--- a/tools/gen-postmortem-metadata.py
+++ b/tools/gen-postmortem-metadata.py
@@ -70,8 +70,6 @@ consts_misc = [
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
- { 'name': 'FailureTag', 'value': 'kFailureTag' },
- { 'name': 'FailureTagMask', 'value': 'kFailureTagMask' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
@@ -93,15 +91,37 @@ consts_misc = [
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
- 'value': 'FIELD' },
- { 'name': 'prop_type_first_phantom',
- 'value': 'TRANSITION' },
+ 'value': 'DATA' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
+ { 'name': 'prop_representation_mask',
+ 'value': 'PropertyDetails::RepresentationField::kMask' },
+ { 'name': 'prop_representation_shift',
+ 'value': 'PropertyDetails::RepresentationField::kShift' },
+ { 'name': 'prop_representation_integer8',
+ 'value': 'Representation::Kind::kInteger8' },
+ { 'name': 'prop_representation_uinteger8',
+ 'value': 'Representation::Kind::kUInteger8' },
+ { 'name': 'prop_representation_integer16',
+ 'value': 'Representation::Kind::kInteger16' },
+ { 'name': 'prop_representation_uinteger16',
+ 'value': 'Representation::Kind::kUInteger16' },
+ { 'name': 'prop_representation_smi',
+ 'value': 'Representation::Kind::kSmi' },
+ { 'name': 'prop_representation_integer32',
+ 'value': 'Representation::Kind::kInteger32' },
+ { 'name': 'prop_representation_double',
+ 'value': 'Representation::Kind::kDouble' },
+ { 'name': 'prop_representation_heapobject',
+ 'value': 'Representation::Kind::kHeapObject' },
+ { 'name': 'prop_representation_tagged',
+ 'value': 'Representation::Kind::kTagged' },
+ { 'name': 'prop_representation_external',
+ 'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
@@ -120,11 +140,15 @@ consts_misc = [
'value': 'DICTIONARY_ELEMENTS' },
{ 'name': 'bit_field2_elements_kind_mask',
- 'value': 'Map::kElementsKindMask' },
+ 'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
- 'value': 'Map::kElementsKindShift' },
+ 'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
+ { 'name': 'bit_field3_number_of_own_descriptors_mask',
+ 'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
+ { 'name': 'bit_field3_number_of_own_descriptors_shift',
+ 'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
@@ -136,24 +160,54 @@ consts_misc = [
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
+
+ { 'name': 'scopeinfo_idx_nparams',
+ 'value': 'ScopeInfo::kParameterCount' },
+ { 'name': 'scopeinfo_idx_nstacklocals',
+ 'value': 'ScopeInfo::kStackLocalCount' },
+ { 'name': 'scopeinfo_idx_ncontextlocals',
+ 'value': 'ScopeInfo::kContextLocalCount' },
+ { 'name': 'scopeinfo_idx_ncontextglobals',
+ 'value': 'ScopeInfo::kContextGlobalCount' },
+ { 'name': 'scopeinfo_idx_first_vars',
+ 'value': 'ScopeInfo::kVariablePartIndex' },
+
+ { 'name': 'sharedfunctioninfo_start_position_mask',
+ 'value': 'SharedFunctionInfo::kStartPositionMask' },
+ { 'name': 'sharedfunctioninfo_start_position_shift',
+ 'value': 'SharedFunctionInfo::kStartPositionShift' },
+
+ { 'name': 'jsarray_buffer_was_neutered_mask',
+ 'value': 'JSArrayBuffer::WasNeutered::kMask' },
+ { 'name': 'jsarray_buffer_was_neutered_shift',
+ 'value': 'JSArrayBuffer::WasNeutered::kShift' },
];
#
# The following useful fields are missing accessors, so we define fake ones.
#
extras_accessors = [
+ 'JSFunction, context, Context, kContextOffset',
+ 'Context, closure_index, int, CLOSURE_INDEX',
+ 'Context, native_context_index, int, NATIVE_CONTEXT_INDEX',
+ 'Context, previous_index, int, PREVIOUS_INDEX',
+ 'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
+ 'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
+ 'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
+ 'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_attributes, int, kInstanceAttributesOffset',
- 'Map, inobject_properties, int, kInObjectPropertiesOffset',
+ 'Map, inobject_properties_or_constructor_function_index, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
- 'Map, bit_field3, SMI, kBitField3Offset',
+ 'Map, bit_field3, int, kBitField3Offset',
'Map, prototype, Object, kPrototypeOffset',
'NameDictionaryShape, prefix_size, int, kPrefixSize',
'NameDictionaryShape, entry_size, int, kEntrySize',
+ 'NameDictionary, prefix_start_index, int, kPrefixStartIndex',
'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'NumberDictionaryShape, entry_size, int, kEntrySize',
@@ -165,6 +219,7 @@ extras_accessors = [
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
+ 'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
@@ -196,9 +251,9 @@ header = '''
* This file is generated by %s. Do not edit directly.
*/
-#include "v8.h"
-#include "frames.h"
-#include "frames-inl.h" /* for architecture-specific frame constants */
+#include "src/v8.h"
+#include "src/frames.h"
+#include "src/frames-inl.h" /* for architecture-specific frame constants */
using namespace v8::internal;
@@ -219,6 +274,20 @@ footer = '''
'''
#
+# Get the base class
+#
+def get_base_class(klass):
+ if (klass == 'Object'):
+ return klass;
+
+ if (not (klass in klasses)):
+ return None;
+
+ k = klasses[klass];
+
+ return get_base_class(k['parent']);
+
+#
# Loads class hierarchy and type information from "objects.h".
#
def load_objects():
@@ -250,18 +319,20 @@ def load_objects():
in_insttype = False;
continue;
- line = re.sub('//.*', '', line.rstrip().lstrip());
+ line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
continue;
- match = re.match('class (\w[^\s:]*)(: public (\w[^\s{]*))?\s*{',
+ match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
line);
if (match):
- klass = match.group(1);
+ klass = match.group(1).strip();
pklass = match.group(3);
+ if (pklass):
+ pklass = pklass.strip();
klasses[klass] = { 'parent': pklass };
#
@@ -512,6 +583,9 @@ def emit_config():
keys.sort();
for klassname in keys:
pklass = klasses[klassname]['parent'];
+ bklass = get_base_class(klassname);
+ if (bklass != 'Object'):
+ continue;
if (pklass == None):
continue;
diff --git a/tools/grokdump.py b/tools/grokdump.py
index 2177ec21..ab8f3265 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -135,10 +135,11 @@ def FullDump(reader, heap):
if is_ascii is not False:
# Output in the same format as the Unix hd command
addr = start
- for slot in xrange(location, location + size, 16):
+ for i in xrange(0, size, 16):
+ slot = i + location
hex_line = ""
asc_line = ""
- for i in xrange(0, 16):
+ for i in xrange(16):
if slot + i < location + size:
byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
if byte >= 0x20 and byte < 0x7f:
@@ -158,9 +159,9 @@ def FullDump(reader, heap):
if is_executable is not True and is_ascii is not True:
print "%s - %s" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size))
- for slot in xrange(start,
- start + size,
- reader.PointerSize()):
+ print start + size + 1;
+ for i in xrange(0, size, reader.PointerSize()):
+ slot = start + i
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
print "%s: %s" % (reader.FormatIntPtr(slot),
@@ -345,6 +346,59 @@ MINIDUMP_CONTEXT_ARM = Descriptor([
MD_CONTEXT_ARM_FLOATING_POINT))
])
+
+MD_CONTEXT_ARM64 = 0x80000000
+MD_CONTEXT_ARM64_INTEGER = (MD_CONTEXT_ARM64 | 0x00000002)
+MD_CONTEXT_ARM64_FLOATING_POINT = (MD_CONTEXT_ARM64 | 0x00000004)
+MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT = 64
+
+MINIDUMP_FLOATING_SAVE_AREA_ARM = Descriptor([
+ ("fpscr", ctypes.c_uint64),
+ ("regs", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT),
+])
+
+MINIDUMP_CONTEXT_ARM64 = Descriptor([
+ ("context_flags", ctypes.c_uint64),
+ # MD_CONTEXT_ARM64_INTEGER.
+ ("r0", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r1", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r2", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r3", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r4", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r5", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r6", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r7", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r8", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r9", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r10", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r11", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r12", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r13", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r14", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r15", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r16", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r17", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r18", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r19", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r20", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r21", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r22", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r23", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r24", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r25", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r26", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r27", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("r28", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("fp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("lr", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("sp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("pc", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+ ("cpsr", ctypes.c_uint32),
+ ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_ARM.ctype,
+ MD_CONTEXT_ARM64_FLOATING_POINT))
+])
+
+
MD_CONTEXT_AMD64 = 0x00100000
MD_CONTEXT_AMD64_CONTROL = (MD_CONTEXT_AMD64 | 0x00000001)
MD_CONTEXT_AMD64_INTEGER = (MD_CONTEXT_AMD64 | 0x00000002)
@@ -434,6 +488,12 @@ MINIDUMP_MEMORY_LIST = Descriptor([
("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
])
+MINIDUMP_MEMORY_LIST_Mac = Descriptor([
+ ("range_count", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
+])
+
MINIDUMP_MEMORY_LIST64 = Descriptor([
("range_count", ctypes.c_uint64),
("base_rva", ctypes.c_uint64),
@@ -455,6 +515,12 @@ MINIDUMP_THREAD_LIST = Descriptor([
("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
])
+MINIDUMP_THREAD_LIST_Mac = Descriptor([
+ ("thread_count", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
+])
+
MINIDUMP_VS_FIXEDFILEINFO = Descriptor([
("dwSignature", ctypes.c_uint32),
("dwStrucVersion", ctypes.c_uint32),
@@ -489,12 +555,19 @@ MINIDUMP_MODULE_LIST = Descriptor([
("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
])
+MINIDUMP_MODULE_LIST_Mac = Descriptor([
+ ("number_of_modules", ctypes.c_uint32),
+ ("junk", ctypes.c_uint32),
+ ("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
+])
+
MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
("processor_architecture", ctypes.c_uint16)
])
MD_CPU_ARCHITECTURE_X86 = 0
MD_CPU_ARCHITECTURE_ARM = 5
+MD_CPU_ARCHITECTURE_ARM64 = 0x8003
MD_CPU_ARCHITECTURE_AMD64 = 9
class FuncSymbol:
@@ -549,6 +622,7 @@ class MinidumpReader(object):
self.arch = system_info.processor_architecture
assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
MD_CPU_ARCHITECTURE_ARM,
+ MD_CPU_ARCHITECTURE_ARM64,
MD_CPU_ARCHITECTURE_X86]
assert not self.arch is None
@@ -567,9 +641,15 @@ class MinidumpReader(object):
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
self.exception_context = MINIDUMP_CONTEXT_ARM.Read(
self.minidump, self.exception.thread_context.rva)
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ self.exception_context = MINIDUMP_CONTEXT_ARM64.Read(
+ self.minidump, self.exception.thread_context.rva)
DebugPrint(self.exception_context)
elif d.stream_type == MD_THREAD_LIST_STREAM:
thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
+ if ctypes.sizeof(thread_list) + 4 == d.location.data_size:
+ thread_list = MINIDUMP_THREAD_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(thread_list) == d.location.data_size
DebugPrint(thread_list)
for thread in thread_list.threads:
@@ -579,12 +659,19 @@ class MinidumpReader(object):
assert self.module_list is None
self.module_list = MINIDUMP_MODULE_LIST.Read(
self.minidump, d.location.rva)
+ if ctypes.sizeof(self.module_list) + 4 == d.location.data_size:
+ self.module_list = MINIDUMP_MODULE_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(self.module_list) == d.location.data_size
+ DebugPrint(self.module_list)
elif d.stream_type == MD_MEMORY_LIST_STREAM:
print >>sys.stderr, "Warning: This is not a full minidump!"
assert self.memory_list is None
self.memory_list = MINIDUMP_MEMORY_LIST.Read(
self.minidump, d.location.rva)
+ if ctypes.sizeof(self.memory_list) + 4 == d.location.data_size:
+ self.memory_list = MINIDUMP_MEMORY_LIST_Mac.Read(
+ self.minidump, d.location.rva)
assert ctypes.sizeof(self.memory_list) == d.location.data_size
DebugPrint(self.memory_list)
elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
@@ -614,6 +701,8 @@ class MinidumpReader(object):
return self.ReadU64(address)
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.ReadU32(address)
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return self.ReadU64(address)
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.ReadU32(address)
@@ -626,13 +715,16 @@ class MinidumpReader(object):
return ctypes.c_uint64.from_buffer(self.minidump, location).value
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return ctypes.c_uint32.from_buffer(self.minidump, location).value
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return ctypes.c_uint64.from_buffer(self.minidump, location).value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return ctypes.c_uint32.from_buffer(self.minidump, location).value
def IsProbableASCIIRegion(self, location, length):
ascii_bytes = 0
non_ascii_bytes = 0
- for loc in xrange(location, location + length):
+ for i in xrange(length):
+ loc = location + i
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if byte >= 0x7f:
non_ascii_bytes += 1
@@ -653,7 +745,8 @@ class MinidumpReader(object):
def IsProbableExecutableRegion(self, location, length):
opcode_bytes = 0
sixty_four = self.arch == MD_CPU_ARCHITECTURE_AMD64
- for loc in xrange(location, location + length):
+ for i in xrange(length):
+ loc = location + i
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if (byte == 0x8b or # mov
byte == 0x89 or # mov reg-reg
@@ -701,7 +794,8 @@ class MinidumpReader(object):
def FindWord(self, word, alignment=0):
def search_inside_region(reader, start, size, location):
location = (location + alignment) & ~alignment
- for loc in xrange(location, location + size - self.PointerSize()):
+ for i in xrange(size - self.PointerSize()):
+ loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
print "%s: %s" % (reader.FormatIntPtr(slot),
@@ -712,7 +806,8 @@ class MinidumpReader(object):
aligned_res = []
unaligned_res = []
def search_inside_region(reader, start, size, location):
- for loc in xrange(location, location + size - self.PointerSize()):
+ for i in xrange(size - self.PointerSize()):
+ loc = location + i
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
if slot % self.PointerSize() == 0:
@@ -749,6 +844,9 @@ class MinidumpReader(object):
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
arch = "arm"
possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ arch = "arm64"
+ possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
arch = "x64"
results = [ disasm.GetDisasmLines(self.minidump_name,
@@ -770,6 +868,8 @@ class MinidumpReader(object):
return self.exception_context.rip
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.exception_context.pc
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return self.exception_context.pc
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.eip
@@ -778,6 +878,8 @@ class MinidumpReader(object):
return self.exception_context.rsp
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.exception_context.sp
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return self.exception_context.sp
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.esp
@@ -786,6 +888,8 @@ class MinidumpReader(object):
return self.exception_context.rbp
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return None
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return self.exception_context.fp
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.ebp
@@ -794,6 +898,8 @@ class MinidumpReader(object):
return "%016x" % value
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return "%08x" % value
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return "%016x" % value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return "%08x" % value
@@ -802,6 +908,8 @@ class MinidumpReader(object):
return 8
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return 4
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return 8
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return 4
@@ -942,8 +1050,11 @@ class HeapObject(object):
p.Print(str(self))
def __str__(self):
+ instance_type = "???"
+ if self.map is not None:
+ instance_type = INSTANCE_TYPES[self.map.instance_type]
return "HeapObject(%s, %s)" % (self.heap.reader.FormatIntPtr(self.address),
- INSTANCE_TYPES[self.map.instance_type])
+ instance_type)
def ObjectField(self, offset):
field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
@@ -1358,9 +1469,9 @@ class JSFunction(HeapObject):
def __str__(self):
inferred_name = ""
- if self.shared.Is(SharedFunctionInfo):
+ if self.shared is not None and self.shared.Is(SharedFunctionInfo):
inferred_name = self.shared.inferred_name
- return "JSFunction(%s, %s)" % \
+ return "JSFunction(%s, %s) " % \
(self.heap.reader.FormatIntPtr(self.address), inferred_name)
def _GetSource(self):
@@ -1562,6 +1673,8 @@ class V8Heap(object):
return (1 << 4) - 1
elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
return (1 << 4) - 1
+ elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM64:
+ return (1 << 4) - 1
elif self.reader.arch == MD_CPU_ARCHITECTURE_X86:
return (1 << 5) - 1
@@ -1590,7 +1703,7 @@ class KnownMap(HeapObject):
COMMENT_RE = re.compile(r"^C (0x[0-9a-fA-F]+) (.*)$")
PAGEADDRESS_RE = re.compile(
- r"^P (mappage|pointerpage|datapage) (0x[0-9a-fA-F]+)$")
+ r"^P (mappage|oldpage) (0x[0-9a-fA-F]+)$")
class InspectionInfo(object):
@@ -1667,8 +1780,7 @@ class InspectionPadawan(object):
self.reader = reader
self.heap = heap
self.known_first_map_page = 0
- self.known_first_data_page = 0
- self.known_first_pointer_page = 0
+ self.known_first_old_page = 0
def __getattr__(self, name):
"""An InspectionPadawan can be used instead of V8Heap, even though
@@ -1684,13 +1796,11 @@ class InspectionPadawan(object):
def IsInKnownOldSpace(self, tagged_address):
page_address = tagged_address & ~self.heap.PageAlignmentMask()
- return page_address in [self.known_first_data_page,
- self.known_first_pointer_page]
+ return page_address == self.known_first_old_page
def ContainingKnownOldSpaceName(self, tagged_address):
page_address = tagged_address & ~self.heap.PageAlignmentMask()
- if page_address == self.known_first_data_page: return "OLD_DATA_SPACE"
- if page_address == self.known_first_pointer_page: return "OLD_POINTER_SPACE"
+ if page_address == self.known_first_old_page: return "OLD_SPACE"
return None
def SenseObject(self, tagged_address):
@@ -1747,11 +1857,9 @@ class InspectionPadawan(object):
def PrintKnowledge(self):
print " known_first_map_page = %s\n"\
- " known_first_data_page = %s\n"\
- " known_first_pointer_page = %s" % (
+ " known_first_old_page = %s" % (
self.reader.FormatIntPtr(self.known_first_map_page),
- self.reader.FormatIntPtr(self.known_first_data_page),
- self.reader.FormatIntPtr(self.known_first_pointer_page))
+ self.reader.FormatIntPtr(self.known_first_old_page))
WEB_HEADER = """
<!DOCTYPE html>
@@ -2066,7 +2174,7 @@ class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
self.send_error(404, 'Web parameter error: %s' % e.message)
-HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span>\n"
+HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span><br/>\n"
class InspectionWebFormatter(object):
@@ -2093,12 +2201,10 @@ class InspectionWebFormatter(object):
self.padawan = InspectionPadawan(self.reader, self.heap)
self.comments = InspectionInfo(minidump_name, self.reader)
- self.padawan.known_first_data_page = (
- self.comments.get_page_address("datapage"))
+ self.padawan.known_first_old_page = (
+ self.comments.get_page_address("oldpage"))
self.padawan.known_first_map_page = (
self.comments.get_page_address("mappage"))
- self.padawan.known_first_pointer_page = (
- self.comments.get_page_address("pointerpage"))
def set_comment(self, straddress, comment):
try:
@@ -2110,12 +2216,10 @@ class InspectionWebFormatter(object):
def set_page_address(self, kind, straddress):
try:
address = int(straddress, 0)
- if kind == "datapage":
- self.padawan.known_first_data_page = address
+ if kind == "oldpage":
+ self.padawan.known_first_old_page = address
elif kind == "mappage":
self.padawan.known_first_map_page = address
- elif kind == "pointerpage":
- self.padawan.known_first_pointer_page = address
self.comments.save_page_address(kind, address)
except ValueError:
print "Invalid address"
@@ -2235,7 +2339,7 @@ class InspectionWebFormatter(object):
f.write("<h3>Exception context</h3>")
f.write('<div class="code">\n')
f.write("Thread id: %d" % exception_thread.id)
- f.write("&nbsp;&nbsp; Exception code: %08X\n" %
+ f.write("&nbsp;&nbsp; Exception code: %08X<br/>\n" %
self.reader.exception.exception.code)
if details == InspectionWebFormatter.CONTEXT_FULL:
if self.reader.exception.exception.parameter_count > 0:
@@ -2248,7 +2352,7 @@ class InspectionWebFormatter(object):
f.write(HTML_REG_FORMAT %
(r, self.format_address(self.reader.Register(r))))
# TODO(vitalyr): decode eflags.
- if self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
+ if self.reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
f.write("<b>cpsr</b>: %s" % bin(self.reader.exception_context.cpsr)[2:])
else:
f.write("<b>eflags</b>: %s" %
@@ -2316,7 +2420,8 @@ class InspectionWebFormatter(object):
f.write('<div class="code">')
f.write("<table class=\"codedump\">\n")
- for slot in xrange(start_address, end_address, size):
+ for j in xrange(0, end_address - start_address, size):
+ slot = start_address + j
heap_object = ""
maybe_address = None
end_region = region[0] + region[1]
@@ -2353,7 +2458,7 @@ class InspectionWebFormatter(object):
f.write(address_fmt % self.format_address(slot))
f.write(" ")
self.td_from_address(f, maybe_address)
- f.write(":&nbsp; %s &nbsp;</td>\n" % straddress)
+ f.write(":&nbsp;%s&nbsp;</td>\n" % straddress)
f.write(" <td>")
if maybe_address != None:
self.output_comment_box(
@@ -2391,7 +2496,8 @@ class InspectionWebFormatter(object):
start = self.align_down(start_address, line_width)
- for address in xrange(start, end_address):
+ for i in xrange(end_address - start):
+ address = start + i
if address % 64 == 0:
if address != start:
f.write("<br>")
@@ -2460,7 +2566,7 @@ class InspectionWebFormatter(object):
(start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
f.write("<table class=\"codedump\">\n");
- for i in xrange(0, len(lines)):
+ for i in xrange(len(lines)):
line = lines[i]
next_address = count
if i + 1 < len(lines):
@@ -2586,13 +2692,10 @@ class InspectionWebFormatter(object):
page_address = address & ~self.heap.PageAlignmentMask()
f.write("Page info: \n")
- self.output_page_info(f, "data", self.padawan.known_first_data_page, \
+ self.output_page_info(f, "old", self.padawan.known_first_old_page, \
page_address)
self.output_page_info(f, "map", self.padawan.known_first_map_page, \
page_address)
- self.output_page_info(f, "pointer", \
- self.padawan.known_first_pointer_page, \
- page_address)
if not self.reader.IsValidAddress(address):
f.write("<h3>The contents at address %s not found in the dump.</h3>" % \
@@ -2807,17 +2910,22 @@ class InspectionShell(cmd.Cmd):
else:
print "%s\n" % string
- def do_dd(self, address):
+ def do_dd(self, args):
"""
- Interpret memory at the given address (if available) as a sequence
- of words. Automatic alignment is not performed.
+ Interpret memory in the given region [address, address + num * word_size)
+ (if available) as a sequence of words. Automatic alignment is not performed.
+ If the num is not specified, a default value of 16 words is used.
+ Synopsis: dd 0x<address> 0x<num>
"""
- start = int(address, 16)
+ args = args.split(' ')
+ start = int(args[0], 16)
+ num = int(args[1], 16) if len(args) > 1 else 0x10
if (start & self.heap.ObjectAlignmentMask()) != 0:
print "Warning: Dumping un-aligned memory, is this what you had in mind?"
- for slot in xrange(start,
- start + self.reader.PointerSize() * 10,
- self.reader.PointerSize()):
+ for i in xrange(0,
+ self.reader.PointerSize() * num,
+ self.reader.PointerSize()):
+ slot = start + i
if not self.reader.IsValidAddress(slot):
print "Address is not contained within the minidump!"
return
@@ -2890,14 +2998,14 @@ class InspectionShell(cmd.Cmd):
"""
self.padawan.PrintKnowledge()
- def do_kd(self, address):
+ def do_ko(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
- data-space page by passing any pointer into that page.
+ old space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
- self.padawan.known_first_data_page = page_address
+ self.padawan.known_first_old_page = page_address
def do_km(self, address):
"""
@@ -2908,15 +3016,6 @@ class InspectionShell(cmd.Cmd):
page_address = address & ~self.heap.PageAlignmentMask()
self.padawan.known_first_map_page = page_address
- def do_kp(self, address):
- """
- Teach V8 heap layout information to the inspector. Set the first
- pointer-space page by passing any pointer into that page.
- """
- address = int(address, 16)
- page_address = address & ~self.heap.PageAlignmentMask()
- self.padawan.known_first_pointer_page = page_address
-
def do_list(self, smth):
"""
List all available memory regions.
@@ -2995,6 +3094,11 @@ CONTEXT_FOR_ARCH = {
MD_CPU_ARCHITECTURE_ARM:
['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
'r10', 'r11', 'r12', 'sp', 'lr', 'pc'],
+ MD_CPU_ARCHITECTURE_ARM64:
+ ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
+ 'r10', 'r11', 'r12', 'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19',
+ 'r20', 'r21', 'r22', 'r23', 'r24', 'r25', 'r26', 'r27', 'r28',
+ 'fp', 'lr', 'sp', 'pc'],
MD_CPU_ARCHITECTURE_X86:
['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
}
@@ -3042,7 +3146,7 @@ def AnalyzeMinidump(options, minidump_name):
for r in CONTEXT_FOR_ARCH[reader.arch]:
print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
# TODO(vitalyr): decode eflags.
- if reader.arch == MD_CPU_ARCHITECTURE_ARM:
+ if reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
print " cpsr: %s" % bin(reader.exception_context.cpsr)[2:]
else:
print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
@@ -3080,6 +3184,10 @@ def AnalyzeMinidump(options, minidump_name):
lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
+ if not lines:
+ print "Could not disassemble using %s." % OBJDUMP_BIN
+ print "Pass path to architecture specific objdump via --objdump?"
+
for line in lines:
print FormatDisasmLine(disasm_start, heap, line)
print
@@ -3101,20 +3209,38 @@ def AnalyzeMinidump(options, minidump_name):
elif not options.command:
if reader.exception is not None:
frame_pointer = reader.ExceptionFP()
+ in_oom_dump_area = False
print "Annotated stack (from exception.esp to bottom):"
for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
ascii_content = [c if c >= '\x20' and c < '\x7f' else '.'
for c in reader.ReadBytes(slot, reader.PointerSize())]
maybe_address = reader.ReadUIntPtr(slot)
+ maybe_address_contents = None
+ if maybe_address >= stack_top and maybe_address <= stack_bottom:
+ maybe_address_contents = reader.ReadUIntPtr(maybe_address)
+ if maybe_address_contents == 0xdecade00:
+ in_oom_dump_area = True
heap_object = heap.FindObject(maybe_address)
maybe_symbol = reader.FindSymbol(maybe_address)
+ oom_comment = ""
+ if in_oom_dump_area:
+ if maybe_address_contents == 0xdecade00:
+ oom_comment = " <----- HeapStats start marker"
+ elif maybe_address_contents == 0xdecade01:
+ oom_comment = " <----- HeapStats end marker"
+ elif maybe_address_contents is not None:
+ oom_comment = " %d (%d Mbytes)" % (maybe_address_contents,
+ maybe_address_contents >> 20)
if slot == frame_pointer:
maybe_symbol = "<---- frame pointer"
frame_pointer = maybe_address
- print "%s: %s %s %s" % (reader.FormatIntPtr(slot),
- reader.FormatIntPtr(maybe_address),
- "".join(ascii_content),
- maybe_symbol or "")
+ print "%s: %s %s %s%s" % (reader.FormatIntPtr(slot),
+ reader.FormatIntPtr(maybe_address),
+ "".join(ascii_content),
+ maybe_symbol or "",
+ oom_comment)
+ if maybe_address_contents == 0xdecade01:
+ in_oom_dump_area = False
if heap_object:
heap_object.Print(Printer())
print
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 696434da..ca5fb090 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -30,49 +30,24 @@
'icu_use_data_file_flag%': 0,
'v8_code': 1,
'v8_random_seed%': 314159265,
+ 'v8_vector_stores%': 0,
+ 'embed_script%': "",
+ 'v8_extra_library_files%': [],
+ 'v8_experimental_extra_library_files%': [],
+ 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
{
'target_name': 'v8',
'dependencies_traverse': 1,
+ 'dependencies': ['v8_maybe_snapshot'],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
-
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_snapshot'],
- }],
- ['v8_use_snapshot!="true" and v8_use_external_startup_data==0', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_nosnapshot'],
- }],
- ['v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'target_conditions': [
- ['_toolset=="host"', {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
- }, {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- }],
- ],
- }],
- ['v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
- }],
['component=="shared_library"', {
'type': '<(component)',
'sources': [
@@ -125,6 +100,50 @@
},
},
{
+ # This rule delegates to either v8_snapshot, v8_nosnapshot, or
+ # v8_external_snapshot, depending on the current variables.
+ # The intention is to make the 'calling' rules a bit simpler.
+ 'target_name': 'v8_maybe_snapshot',
+ 'type': 'none',
+ 'conditions': [
+ ['v8_use_snapshot!="true"', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_nosnapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_snapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ }, {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
+ ],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ]
+ },
+ {
'target_name': 'v8_snapshot',
'type': 'static_library',
'conditions': [
@@ -163,13 +182,16 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'actions': [
{
'action_name': 'run_mksnapshot',
'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ '<(mksnapshot_exec)',
+ '<(embed_script)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
@@ -183,12 +205,16 @@
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
],
},
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc'
+ '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(embed_script)',
],
},
],
@@ -205,7 +231,9 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- '../../src/snapshot-empty.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ '../../src/snapshot/snapshot-empty.cc',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -263,14 +291,14 @@
'../..',
],
'sources': [
- '../../src/natives-external.cc',
- '../../src/snapshot-external.cc',
+ '../../src/snapshot/natives-external.cc',
+ '../../src/snapshot/snapshot-external.cc',
],
'actions': [
{
'action_name': 'run_mksnapshot (external)',
'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ '<(mksnapshot_exec)',
],
'variables': {
'mksnapshot_flags': [
@@ -281,6 +309,9 @@
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
],
},
'conditions': [
@@ -288,38 +319,35 @@
'target_conditions': [
['_toolset=="host"', {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob_host.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ '<(embed_script)',
],
}, {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
],
}],
],
}, {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
- '<@(_inputs)',
+ '<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
],
}],
],
@@ -339,32 +367,66 @@
},
'include_dirs+': [
'../..',
+ # To be able to find base/trace_event/common/trace_event_common.h
+ '../../..',
+ ],
+ 'defines': [
+ # TODO(jochen): Remove again after this is globally turned on.
+ 'V8_IMMINENT_DEPRECATION_WARNINGS',
],
'sources': [ ### gcmole(all) ###
+ '../../include/v8-debug.h',
+ '../../include/v8-experimental.h',
+ '../../include/v8-platform.h',
+ '../../include/v8-profiler.h',
+ '../../include/v8-testing.h',
+ '../../include/v8-util.h',
+ '../../include/v8-version.h',
+ '../../include/v8.h',
+ '../../include/v8config.h',
'../../src/accessors.cc',
'../../src/accessors.h',
+ '../../src/address-map.cc',
+ '../../src/address-map.h',
'../../src/allocation.cc',
'../../src/allocation.h',
'../../src/allocation-site-scopes.cc',
'../../src/allocation-site-scopes.h',
- '../../src/allocation-tracker.cc',
- '../../src/allocation-tracker.h',
+ '../../src/api-experimental.cc',
+ '../../src/api-experimental.h',
'../../src/api.cc',
'../../src/api.h',
+ '../../src/api-natives.cc',
+ '../../src/api-natives.h',
'../../src/arguments.cc',
'../../src/arguments.h',
'../../src/assembler.cc',
'../../src/assembler.h',
'../../src/assert-scope.h',
'../../src/assert-scope.cc',
- '../../src/ast-this-access-visitor.cc',
- '../../src/ast-this-access-visitor.h',
- '../../src/ast-value-factory.cc',
- '../../src/ast-value-factory.h',
- '../../src/ast-numbering.cc',
- '../../src/ast-numbering.h',
- '../../src/ast.cc',
- '../../src/ast.h',
+ '../../src/ast/ast-expression-rewriter.cc',
+ '../../src/ast/ast-expression-rewriter.h',
+ '../../src/ast/ast-expression-visitor.cc',
+ '../../src/ast/ast-expression-visitor.h',
+ '../../src/ast/ast-literal-reindexer.cc',
+ '../../src/ast/ast-literal-reindexer.h',
+ '../../src/ast/ast-numbering.cc',
+ '../../src/ast/ast-numbering.h',
+ '../../src/ast/ast-value-factory.cc',
+ '../../src/ast/ast-value-factory.h',
+ '../../src/ast/ast.cc',
+ '../../src/ast/ast.h',
+ '../../src/ast/modules.cc',
+ '../../src/ast/modules.h',
+ '../../src/ast/prettyprinter.cc',
+ '../../src/ast/prettyprinter.h',
+ '../../src/ast/scopeinfo.cc',
+ '../../src/ast/scopeinfo.h',
+ '../../src/ast/scopes.cc',
+ '../../src/ast/scopes.h',
+ '../../src/ast/variables.cc',
+ '../../src/ast/variables.h',
+ '../../src/atomic-utils.h',
'../../src/background-parsing-task.cc',
'../../src/background-parsing-task.h',
'../../src/bailout-reason.cc',
@@ -381,41 +443,55 @@
'../../src/bootstrapper.h',
'../../src/builtins.cc',
'../../src/builtins.h',
- '../../src/bytecodes-irregexp.h',
'../../src/cached-powers.cc',
'../../src/cached-powers.h',
+ '../../src/cancelable-task.cc',
+ '../../src/cancelable-task.h',
'../../src/char-predicates.cc',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
- '../../src/checks.cc',
'../../src/checks.h',
- '../../src/circular-queue-inl.h',
- '../../src/circular-queue.h',
'../../src/code-factory.cc',
'../../src/code-factory.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code-stubs-hydrogen.cc',
- '../../src/code.h',
'../../src/codegen.cc',
'../../src/codegen.h',
'../../src/compilation-cache.cc',
'../../src/compilation-cache.h',
+ '../../src/compilation-dependencies.cc',
+ '../../src/compilation-dependencies.h',
'../../src/compilation-statistics.cc',
'../../src/compilation-statistics.h',
'../../src/compiler/access-builder.cc',
'../../src/compiler/access-builder.h',
+ '../../src/compiler/access-info.cc',
+ '../../src/compiler/access-info.h',
+ '../../src/compiler/all-nodes.cc',
+ '../../src/compiler/all-nodes.h',
'../../src/compiler/ast-graph-builder.cc',
'../../src/compiler/ast-graph-builder.h',
'../../src/compiler/ast-loop-assignment-analyzer.cc',
'../../src/compiler/ast-loop-assignment-analyzer.h',
'../../src/compiler/basic-block-instrumentor.cc',
'../../src/compiler/basic-block-instrumentor.h',
+ '../../src/compiler/branch-elimination.cc',
+ '../../src/compiler/branch-elimination.h',
+ '../../src/compiler/bytecode-branch-analysis.cc',
+ '../../src/compiler/bytecode-branch-analysis.h',
+ '../../src/compiler/bytecode-graph-builder.cc',
+ '../../src/compiler/bytecode-graph-builder.h',
'../../src/compiler/change-lowering.cc',
'../../src/compiler/change-lowering.h',
+ '../../src/compiler/c-linkage.cc',
+ '../../src/compiler/coalesced-live-ranges.cc',
+ '../../src/compiler/coalesced-live-ranges.h',
'../../src/compiler/code-generator-impl.h',
'../../src/compiler/code-generator.cc',
'../../src/compiler/code-generator.h',
+ '../../src/compiler/code-stub-assembler.cc',
+ '../../src/compiler/code-stub-assembler.h',
'../../src/compiler/common-node-cache.cc',
'../../src/compiler/common-node-cache.h',
'../../src/compiler/common-operator-reducer.cc',
@@ -424,70 +500,105 @@
'../../src/compiler/common-operator.h',
'../../src/compiler/control-builders.cc',
'../../src/compiler/control-builders.h',
+ '../../src/compiler/control-equivalence.cc',
'../../src/compiler/control-equivalence.h',
- '../../src/compiler/control-reducer.cc',
- '../../src/compiler/control-reducer.h',
+ '../../src/compiler/control-flow-optimizer.cc',
+ '../../src/compiler/control-flow-optimizer.h',
+ '../../src/compiler/dead-code-elimination.cc',
+ '../../src/compiler/dead-code-elimination.h',
'../../src/compiler/diamond.h',
+ '../../src/compiler/escape-analysis.cc',
+ '../../src/compiler/escape-analysis.h',
+ "../../src/compiler/escape-analysis-reducer.cc",
+ "../../src/compiler/escape-analysis-reducer.h",
+ '../../src/compiler/fast-accessor-assembler.cc',
+ '../../src/compiler/fast-accessor-assembler.h',
+ '../../src/compiler/frame.cc',
'../../src/compiler/frame.h',
+ '../../src/compiler/frame-elider.cc',
+ '../../src/compiler/frame-elider.h',
+ "../../src/compiler/frame-states.cc",
+ "../../src/compiler/frame-states.h",
'../../src/compiler/gap-resolver.cc',
'../../src/compiler/gap-resolver.h',
- '../../src/compiler/generic-algorithm.h',
- '../../src/compiler/graph-builder.cc',
- '../../src/compiler/graph-builder.h',
- '../../src/compiler/graph-inl.h',
'../../src/compiler/graph-reducer.cc',
'../../src/compiler/graph-reducer.h',
'../../src/compiler/graph-replay.cc',
'../../src/compiler/graph-replay.h',
+ '../../src/compiler/graph-trimmer.cc',
+ '../../src/compiler/graph-trimmer.h',
'../../src/compiler/graph-visualizer.cc',
'../../src/compiler/graph-visualizer.h',
'../../src/compiler/graph.cc',
'../../src/compiler/graph.h',
+ '../../src/compiler/greedy-allocator.cc',
+ '../../src/compiler/greedy-allocator.h',
'../../src/compiler/instruction-codes.h',
'../../src/compiler/instruction-selector-impl.h',
'../../src/compiler/instruction-selector.cc',
'../../src/compiler/instruction-selector.h',
+ '../../src/compiler/instruction-scheduler.cc',
+ '../../src/compiler/instruction-scheduler.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
+ '../../src/compiler/interpreter-assembler.cc',
+ '../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
+ '../../src/compiler/js-call-reducer.cc',
+ '../../src/compiler/js-call-reducer.h',
+ '../../src/compiler/js-context-relaxation.cc',
+ '../../src/compiler/js-context-relaxation.h',
'../../src/compiler/js-context-specialization.cc',
'../../src/compiler/js-context-specialization.h',
+ '../../src/compiler/js-frame-specialization.cc',
+ '../../src/compiler/js-frame-specialization.h',
'../../src/compiler/js-generic-lowering.cc',
'../../src/compiler/js-generic-lowering.h',
+ '../../src/compiler/js-global-object-specialization.cc',
+ '../../src/compiler/js-global-object-specialization.h',
'../../src/compiler/js-graph.cc',
'../../src/compiler/js-graph.h',
'../../src/compiler/js-inlining.cc',
'../../src/compiler/js-inlining.h',
- '../../src/compiler/js-intrinsic-builder.cc',
- '../../src/compiler/js-intrinsic-builder.h',
+ '../../src/compiler/js-inlining-heuristic.cc',
+ '../../src/compiler/js-inlining-heuristic.h',
+ '../../src/compiler/js-intrinsic-lowering.cc',
+ '../../src/compiler/js-intrinsic-lowering.h',
+ '../../src/compiler/js-native-context-specialization.cc',
+ '../../src/compiler/js-native-context-specialization.h',
'../../src/compiler/js-operator.cc',
'../../src/compiler/js-operator.h',
'../../src/compiler/js-typed-lowering.cc',
'../../src/compiler/js-typed-lowering.h',
'../../src/compiler/jump-threading.cc',
'../../src/compiler/jump-threading.h',
- '../../src/compiler/linkage-impl.h',
'../../src/compiler/linkage.cc',
'../../src/compiler/linkage.h',
+ '../../src/compiler/liveness-analyzer.cc',
+ '../../src/compiler/liveness-analyzer.h',
+ '../../src/compiler/live-range-separator.cc',
+ '../../src/compiler/live-range-separator.h',
'../../src/compiler/load-elimination.cc',
'../../src/compiler/load-elimination.h',
'../../src/compiler/loop-analysis.cc',
'../../src/compiler/loop-analysis.h',
+ '../../src/compiler/loop-peeling.cc',
+ '../../src/compiler/loop-peeling.h',
'../../src/compiler/machine-operator-reducer.cc',
'../../src/compiler/machine-operator-reducer.h',
'../../src/compiler/machine-operator.cc',
'../../src/compiler/machine-operator.h',
- '../../src/compiler/machine-type.cc',
- '../../src/compiler/machine-type.h',
'../../src/compiler/move-optimizer.cc',
'../../src/compiler/move-optimizer.h',
- '../../src/compiler/node-aux-data-inl.h',
'../../src/compiler/node-aux-data.h',
'../../src/compiler/node-cache.cc',
'../../src/compiler/node-cache.h',
+ '../../src/compiler/node-marker.cc',
+ '../../src/compiler/node-marker.h',
+ '../../src/compiler/node-matchers.cc',
'../../src/compiler/node-matchers.h',
- '../../src/compiler/node-properties-inl.h',
+ '../../src/compiler/node-properties.cc',
'../../src/compiler/node-properties.h',
'../../src/compiler/node.cc',
'../../src/compiler/node.h',
@@ -497,6 +608,8 @@
'../../src/compiler/operator-properties.h',
'../../src/compiler/operator.cc',
'../../src/compiler/operator.h',
+ '../../src/compiler/osr.cc',
+ '../../src/compiler/osr.h',
'../../src/compiler/pipeline.cc',
'../../src/compiler/pipeline.h',
'../../src/compiler/pipeline-statistics.cc',
@@ -507,8 +620,7 @@
'../../src/compiler/register-allocator.h',
'../../src/compiler/register-allocator-verifier.cc',
'../../src/compiler/register-allocator-verifier.h',
- '../../src/compiler/register-configuration.cc',
- '../../src/compiler/register-configuration.h',
+ '../../src/compiler/representation-change.cc',
'../../src/compiler/representation-change.h',
'../../src/compiler/schedule.cc',
'../../src/compiler/schedule.h',
@@ -524,16 +636,30 @@
'../../src/compiler/simplified-operator.h',
'../../src/compiler/source-position.cc',
'../../src/compiler/source-position.h',
+ '../../src/compiler/state-values-utils.cc',
+ '../../src/compiler/state-values-utils.h',
+ '../../src/compiler/tail-call-optimization.cc',
+ '../../src/compiler/tail-call-optimization.h',
+ '../../src/compiler/type-hint-analyzer.cc',
+ '../../src/compiler/type-hint-analyzer.h',
+ '../../src/compiler/type-hints.cc',
+ '../../src/compiler/type-hints.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
'../../src/compiler/value-numbering-reducer.cc',
'../../src/compiler/value-numbering-reducer.h',
'../../src/compiler/verifier.cc',
'../../src/compiler/verifier.h',
+ '../../src/compiler/wasm-compiler.cc',
+ '../../src/compiler/wasm-compiler.h',
+ '../../src/compiler/wasm-linkage.cc',
'../../src/compiler/zone-pool.cc',
'../../src/compiler/zone-pool.h',
'../../src/compiler.cc',
'../../src/compiler.h',
+ '../../src/context-measure.cc',
+ '../../src/context-measure.h',
+ '../../src/contexts-inl.h',
'../../src/contexts.cc',
'../../src/contexts.h',
'../../src/conversions-inl.h',
@@ -541,16 +667,84 @@
'../../src/conversions.h',
'../../src/counters.cc',
'../../src/counters.h',
- '../../src/cpu-profiler-inl.h',
- '../../src/cpu-profiler.cc',
- '../../src/cpu-profiler.h',
+ '../../src/crankshaft/hydrogen-alias-analysis.h',
+ '../../src/crankshaft/hydrogen-bce.cc',
+ '../../src/crankshaft/hydrogen-bce.h',
+ '../../src/crankshaft/hydrogen-bch.cc',
+ '../../src/crankshaft/hydrogen-bch.h',
+ '../../src/crankshaft/hydrogen-canonicalize.cc',
+ '../../src/crankshaft/hydrogen-canonicalize.h',
+ '../../src/crankshaft/hydrogen-check-elimination.cc',
+ '../../src/crankshaft/hydrogen-check-elimination.h',
+ '../../src/crankshaft/hydrogen-dce.cc',
+ '../../src/crankshaft/hydrogen-dce.h',
+ '../../src/crankshaft/hydrogen-dehoist.cc',
+ '../../src/crankshaft/hydrogen-dehoist.h',
+ '../../src/crankshaft/hydrogen-environment-liveness.cc',
+ '../../src/crankshaft/hydrogen-environment-liveness.h',
+ '../../src/crankshaft/hydrogen-escape-analysis.cc',
+ '../../src/crankshaft/hydrogen-escape-analysis.h',
+ '../../src/crankshaft/hydrogen-flow-engine.h',
+ '../../src/crankshaft/hydrogen-gvn.cc',
+ '../../src/crankshaft/hydrogen-gvn.h',
+ '../../src/crankshaft/hydrogen-infer-representation.cc',
+ '../../src/crankshaft/hydrogen-infer-representation.h',
+ '../../src/crankshaft/hydrogen-infer-types.cc',
+ '../../src/crankshaft/hydrogen-infer-types.h',
+ '../../src/crankshaft/hydrogen-instructions.cc',
+ '../../src/crankshaft/hydrogen-instructions.h',
+ '../../src/crankshaft/hydrogen-load-elimination.cc',
+ '../../src/crankshaft/hydrogen-load-elimination.h',
+ '../../src/crankshaft/hydrogen-mark-deoptimize.cc',
+ '../../src/crankshaft/hydrogen-mark-deoptimize.h',
+ '../../src/crankshaft/hydrogen-mark-unreachable.cc',
+ '../../src/crankshaft/hydrogen-mark-unreachable.h',
+ '../../src/crankshaft/hydrogen-osr.cc',
+ '../../src/crankshaft/hydrogen-osr.h',
+ '../../src/crankshaft/hydrogen-range-analysis.cc',
+ '../../src/crankshaft/hydrogen-range-analysis.h',
+ '../../src/crankshaft/hydrogen-redundant-phi.cc',
+ '../../src/crankshaft/hydrogen-redundant-phi.h',
+ '../../src/crankshaft/hydrogen-removable-simulates.cc',
+ '../../src/crankshaft/hydrogen-removable-simulates.h',
+ '../../src/crankshaft/hydrogen-representation-changes.cc',
+ '../../src/crankshaft/hydrogen-representation-changes.h',
+ '../../src/crankshaft/hydrogen-sce.cc',
+ '../../src/crankshaft/hydrogen-sce.h',
+ '../../src/crankshaft/hydrogen-store-elimination.cc',
+ '../../src/crankshaft/hydrogen-store-elimination.h',
+ '../../src/crankshaft/hydrogen-types.cc',
+ '../../src/crankshaft/hydrogen-types.h',
+ '../../src/crankshaft/hydrogen-uint32-analysis.cc',
+ '../../src/crankshaft/hydrogen-uint32-analysis.h',
+ '../../src/crankshaft/hydrogen.cc',
+ '../../src/crankshaft/hydrogen.h',
+ '../../src/crankshaft/lithium-allocator-inl.h',
+ '../../src/crankshaft/lithium-allocator.cc',
+ '../../src/crankshaft/lithium-allocator.h',
+ '../../src/crankshaft/lithium-codegen.cc',
+ '../../src/crankshaft/lithium-codegen.h',
+ '../../src/crankshaft/lithium.cc',
+ '../../src/crankshaft/lithium.h',
+ '../../src/crankshaft/lithium-inl.h',
+ '../../src/crankshaft/typing.cc',
+ '../../src/crankshaft/typing.h',
+ '../../src/crankshaft/unique.h',
'../../src/date.cc',
'../../src/date.h',
'../../src/dateparser-inl.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
- '../../src/debug.cc',
- '../../src/debug.h',
+ '../../src/debug/debug-evaluate.cc',
+ '../../src/debug/debug-evaluate.h',
+ '../../src/debug/debug-frames.cc',
+ '../../src/debug/debug-frames.h',
+ '../../src/debug/debug-scopes.cc',
+ '../../src/debug/debug-scopes.h',
+ '../../src/debug/debug.cc',
+ '../../src/debug/debug.h',
+ '../../src/debug/liveedit.cc',
+ '../../src/debug/liveedit.h',
'../../src/deoptimizer.cc',
'../../src/deoptimizer.h',
'../../src/disasm.h',
@@ -592,10 +786,10 @@
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
- '../../src/full-codegen.cc',
- '../../src/full-codegen.h',
- '../../src/func-name-inferrer.cc',
- '../../src/func-name-inferrer.h',
+ '../../src/full-codegen/full-codegen.cc',
+ '../../src/full-codegen/full-codegen.h',
+ '../../src/futex-emulation.cc',
+ '../../src/futex-emulation.h',
'../../src/gdb-jit.cc',
'../../src/gdb-jit.h',
'../../src/global-handles.cc',
@@ -605,11 +799,10 @@
'../../src/handles.cc',
'../../src/handles.h',
'../../src/hashmap.h',
- '../../src/heap-profiler.cc',
- '../../src/heap-profiler.h',
- '../../src/heap-snapshot-generator-inl.h',
- '../../src/heap-snapshot-generator.cc',
- '../../src/heap-snapshot-generator.h',
+ '../../src/heap/array-buffer-tracker.cc',
+ '../../src/heap/array-buffer-tracker.h',
+ '../../src/heap/memory-reducer.cc',
+ '../../src/heap/memory-reducer.h',
'../../src/heap/gc-idle-time-handler.cc',
'../../src/heap/gc-idle-time-handler.h',
'../../src/heap/gc-tracer.cc',
@@ -618,72 +811,31 @@
'../../src/heap/heap.cc',
'../../src/heap/heap.h',
'../../src/heap/incremental-marking-inl.h',
+ '../../src/heap/incremental-marking-job.cc',
+ '../../src/heap/incremental-marking-job.h',
'../../src/heap/incremental-marking.cc',
'../../src/heap/incremental-marking.h',
'../../src/heap/mark-compact-inl.h',
'../../src/heap/mark-compact.cc',
'../../src/heap/mark-compact.h',
+ '../../src/heap/object-stats.cc',
+ '../../src/heap/object-stats.h',
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
+ '../../src/heap/scavenge-job.h',
+ '../../src/heap/scavenge-job.cc',
+ '../../src/heap/scavenger-inl.h',
+ '../../src/heap/scavenger.cc',
+ '../../src/heap/scavenger.h',
+ '../../src/heap/slots-buffer.cc',
+ '../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
'../../src/heap/spaces.cc',
'../../src/heap/spaces.h',
'../../src/heap/store-buffer-inl.h',
'../../src/heap/store-buffer.cc',
'../../src/heap/store-buffer.h',
- '../../src/hydrogen-alias-analysis.h',
- '../../src/hydrogen-bce.cc',
- '../../src/hydrogen-bce.h',
- '../../src/hydrogen-bch.cc',
- '../../src/hydrogen-bch.h',
- '../../src/hydrogen-canonicalize.cc',
- '../../src/hydrogen-canonicalize.h',
- '../../src/hydrogen-check-elimination.cc',
- '../../src/hydrogen-check-elimination.h',
- '../../src/hydrogen-dce.cc',
- '../../src/hydrogen-dce.h',
- '../../src/hydrogen-dehoist.cc',
- '../../src/hydrogen-dehoist.h',
- '../../src/hydrogen-environment-liveness.cc',
- '../../src/hydrogen-environment-liveness.h',
- '../../src/hydrogen-escape-analysis.cc',
- '../../src/hydrogen-escape-analysis.h',
- '../../src/hydrogen-flow-engine.h',
- '../../src/hydrogen-instructions.cc',
- '../../src/hydrogen-instructions.h',
- '../../src/hydrogen.cc',
- '../../src/hydrogen.h',
- '../../src/hydrogen-gvn.cc',
- '../../src/hydrogen-gvn.h',
- '../../src/hydrogen-infer-representation.cc',
- '../../src/hydrogen-infer-representation.h',
- '../../src/hydrogen-infer-types.cc',
- '../../src/hydrogen-infer-types.h',
- '../../src/hydrogen-load-elimination.cc',
- '../../src/hydrogen-load-elimination.h',
- '../../src/hydrogen-mark-deoptimize.cc',
- '../../src/hydrogen-mark-deoptimize.h',
- '../../src/hydrogen-mark-unreachable.cc',
- '../../src/hydrogen-mark-unreachable.h',
- '../../src/hydrogen-osr.cc',
- '../../src/hydrogen-osr.h',
- '../../src/hydrogen-range-analysis.cc',
- '../../src/hydrogen-range-analysis.h',
- '../../src/hydrogen-redundant-phi.cc',
- '../../src/hydrogen-redundant-phi.h',
- '../../src/hydrogen-removable-simulates.cc',
- '../../src/hydrogen-removable-simulates.h',
- '../../src/hydrogen-representation-changes.cc',
- '../../src/hydrogen-representation-changes.h',
- '../../src/hydrogen-sce.cc',
- '../../src/hydrogen-sce.h',
- '../../src/hydrogen-store-elimination.cc',
- '../../src/hydrogen-store-elimination.h',
- '../../src/hydrogen-types.cc',
- '../../src/hydrogen-types.h',
- '../../src/hydrogen-uint32-analysis.cc',
- '../../src/hydrogen-uint32-analysis.h',
'../../src/i18n.cc',
'../../src/i18n.h',
'../../src/icu_util.cc',
@@ -701,98 +853,153 @@
'../../src/ic/ic.h',
'../../src/ic/ic-compiler.cc',
'../../src/ic/ic-compiler.h',
- '../../src/interface.cc',
- '../../src/interface.h',
+ '../../src/identity-map.cc',
+ '../../src/identity-map.h',
'../../src/interface-descriptors.cc',
'../../src/interface-descriptors.h',
- '../../src/interpreter-irregexp.cc',
- '../../src/interpreter-irregexp.h',
+ '../../src/interpreter/bytecodes.cc',
+ '../../src/interpreter/bytecodes.h',
+ '../../src/interpreter/bytecode-array-builder.cc',
+ '../../src/interpreter/bytecode-array-builder.h',
+ '../../src/interpreter/bytecode-array-iterator.cc',
+ '../../src/interpreter/bytecode-array-iterator.h',
+ '../../src/interpreter/bytecode-register-allocator.cc',
+ '../../src/interpreter/bytecode-register-allocator.h',
+ '../../src/interpreter/bytecode-generator.cc',
+ '../../src/interpreter/bytecode-generator.h',
+ '../../src/interpreter/bytecode-traits.h',
+ '../../src/interpreter/constant-array-builder.cc',
+ '../../src/interpreter/constant-array-builder.h',
+ '../../src/interpreter/control-flow-builders.cc',
+ '../../src/interpreter/control-flow-builders.h',
+ '../../src/interpreter/interpreter.cc',
+ '../../src/interpreter/interpreter.h',
+ '../../src/isolate-inl.h',
'../../src/isolate.cc',
'../../src/isolate.h',
- '../../src/json-parser.h',
'../../src/json-stringifier.h',
- '../../src/jsregexp-inl.h',
- '../../src/jsregexp.cc',
- '../../src/jsregexp.h',
- '../../src/layout-descriptor-inl.h',
- '../../src/layout-descriptor.cc',
- '../../src/layout-descriptor.h',
+ '../../src/key-accumulator.h',
+ '../../src/key-accumulator.cc',
+ '../../src/layout-descriptor-inl.h',
+ '../../src/layout-descriptor.cc',
+ '../../src/layout-descriptor.h',
'../../src/list-inl.h',
'../../src/list.h',
- '../../src/lithium-allocator-inl.h',
- '../../src/lithium-allocator.cc',
- '../../src/lithium-allocator.h',
- '../../src/lithium-codegen.cc',
- '../../src/lithium-codegen.h',
- '../../src/lithium.cc',
- '../../src/lithium.h',
- '../../src/lithium-inl.h',
- '../../src/liveedit.cc',
- '../../src/liveedit.h',
+ '../../src/locked-queue-inl.h',
+ '../../src/locked-queue.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
'../../src/log.cc',
'../../src/log.h',
- '../../src/lookup-inl.h',
'../../src/lookup.cc',
'../../src/lookup.h',
'../../src/macro-assembler.h',
+ '../../src/machine-type.cc',
+ '../../src/machine-type.h',
'../../src/messages.cc',
'../../src/messages.h',
'../../src/msan.h',
- '../../src/natives.h',
+ '../../src/objects-body-descriptors-inl.h',
+ '../../src/objects-body-descriptors.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
'../../src/objects-printer.cc',
'../../src/objects.cc',
'../../src/objects.h',
- '../../src/optimizing-compiler-thread.cc',
- '../../src/optimizing-compiler-thread.h',
+ '../../src/optimizing-compile-dispatcher.cc',
+ '../../src/optimizing-compile-dispatcher.h',
'../../src/ostreams.cc',
'../../src/ostreams.h',
- '../../src/parser.cc',
- '../../src/parser.h',
- '../../src/perf-jit.cc',
- '../../src/perf-jit.h',
- '../../src/preparse-data-format.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/prettyprinter.cc',
- '../../src/prettyprinter.h',
- '../../src/profile-generator-inl.h',
- '../../src/profile-generator.cc',
- '../../src/profile-generator.h',
+ '../../src/parsing/expression-classifier.h',
+ '../../src/parsing/func-name-inferrer.cc',
+ '../../src/parsing/func-name-inferrer.h',
+ '../../src/parsing/json-parser.h',
+ '../../src/parsing/parameter-initializer-rewriter.cc',
+ '../../src/parsing/parameter-initializer-rewriter.h',
+ '../../src/parsing/parser-base.h',
+ '../../src/parsing/parser.cc',
+ '../../src/parsing/parser.h',
+ '../../src/parsing/pattern-rewriter.cc',
+ '../../src/parsing/preparse-data-format.h',
+ '../../src/parsing/preparse-data.cc',
+ '../../src/parsing/preparse-data.h',
+ '../../src/parsing/preparser.cc',
+ '../../src/parsing/preparser.h',
+ '../../src/parsing/rewriter.cc',
+ '../../src/parsing/rewriter.h',
+ '../../src/parsing/scanner-character-streams.cc',
+ '../../src/parsing/scanner-character-streams.h',
+ '../../src/parsing/scanner.cc',
+ '../../src/parsing/scanner.h',
+ '../../src/parsing/token.cc',
+ '../../src/parsing/token.h',
+ '../../src/pending-compilation-error-handler.cc',
+ '../../src/pending-compilation-error-handler.h',
+ '../../src/profiler/allocation-tracker.cc',
+ '../../src/profiler/allocation-tracker.h',
+ '../../src/profiler/circular-queue-inl.h',
+ '../../src/profiler/circular-queue.h',
+ '../../src/profiler/cpu-profiler-inl.h',
+ '../../src/profiler/cpu-profiler.cc',
+ '../../src/profiler/cpu-profiler.h',
+ '../../src/profiler/heap-profiler.cc',
+ '../../src/profiler/heap-profiler.h',
+ '../../src/profiler/heap-snapshot-generator-inl.h',
+ '../../src/profiler/heap-snapshot-generator.cc',
+ '../../src/profiler/heap-snapshot-generator.h',
+ '../../src/profiler/profile-generator-inl.h',
+ '../../src/profiler/profile-generator.cc',
+ '../../src/profiler/profile-generator.h',
+ '../../src/profiler/sampler.cc',
+ '../../src/profiler/sampler.h',
+ '../../src/profiler/strings-storage.cc',
+ '../../src/profiler/strings-storage.h',
+ '../../src/profiler/unbound-queue-inl.h',
+ '../../src/profiler/unbound-queue.h',
+ '../../src/property-descriptor.cc',
+ '../../src/property-descriptor.h',
'../../src/property-details.h',
'../../src/property.cc',
'../../src/property.h',
'../../src/prototype.h',
- '../../src/regexp-macro-assembler-irregexp-inl.h',
- '../../src/regexp-macro-assembler-irregexp.cc',
- '../../src/regexp-macro-assembler-irregexp.h',
- '../../src/regexp-macro-assembler-tracer.cc',
- '../../src/regexp-macro-assembler-tracer.h',
- '../../src/regexp-macro-assembler.cc',
- '../../src/regexp-macro-assembler.h',
- '../../src/regexp-stack.cc',
- '../../src/regexp-stack.h',
- '../../src/rewriter.cc',
- '../../src/rewriter.h',
+ '../../src/regexp/bytecodes-irregexp.h',
+ '../../src/regexp/interpreter-irregexp.cc',
+ '../../src/regexp/interpreter-irregexp.h',
+ '../../src/regexp/jsregexp-inl.h',
+ '../../src/regexp/jsregexp.cc',
+ '../../src/regexp/jsregexp.h',
+ '../../src/regexp/regexp-ast.cc',
+ '../../src/regexp/regexp-ast.h',
+ '../../src/regexp/regexp-macro-assembler-irregexp-inl.h',
+ '../../src/regexp/regexp-macro-assembler-irregexp.cc',
+ '../../src/regexp/regexp-macro-assembler-irregexp.h',
+ '../../src/regexp/regexp-macro-assembler-tracer.cc',
+ '../../src/regexp/regexp-macro-assembler-tracer.h',
+ '../../src/regexp/regexp-macro-assembler.cc',
+ '../../src/regexp/regexp-macro-assembler.h',
+ '../../src/regexp/regexp-parser.cc',
+ '../../src/regexp/regexp-parser.h',
+ '../../src/regexp/regexp-stack.cc',
+ '../../src/regexp/regexp-stack.h',
+ '../../src/register-configuration.cc',
+ '../../src/register-configuration.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
- '../../src/runtime/runtime-api.cc',
'../../src/runtime/runtime-array.cc',
+ '../../src/runtime/runtime-atomics.cc',
'../../src/runtime/runtime-classes.cc',
'../../src/runtime/runtime-collections.cc',
'../../src/runtime/runtime-compiler.cc',
'../../src/runtime/runtime-date.cc',
'../../src/runtime/runtime-debug.cc',
+ '../../src/runtime/runtime-forin.cc',
'../../src/runtime/runtime-function.cc',
+ '../../src/runtime/runtime-futex.cc',
'../../src/runtime/runtime-generator.cc',
'../../src/runtime/runtime-i18n.cc',
'../../src/runtime/runtime-internal.cc',
+ '../../src/runtime/runtime-interpreter.cc',
'../../src/runtime/runtime-json.cc',
'../../src/runtime/runtime-literals.cc',
'../../src/runtime/runtime-liveedit.cc',
@@ -800,9 +1007,11 @@
'../../src/runtime/runtime-numbers.cc',
'../../src/runtime/runtime-object.cc',
'../../src/runtime/runtime-observe.cc',
+ '../../src/runtime/runtime-operators.cc',
'../../src/runtime/runtime-proxy.cc',
'../../src/runtime/runtime-regexp.cc',
'../../src/runtime/runtime-scopes.cc',
+ '../../src/runtime/runtime-simd.cc',
'../../src/runtime/runtime-strings.cc',
'../../src/runtime/runtime-symbol.cc',
'../../src/runtime/runtime-test.cc',
@@ -813,27 +1022,23 @@
'../../src/runtime/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
- '../../src/sampler.cc',
- '../../src/sampler.h',
- '../../src/scanner-character-streams.cc',
- '../../src/scanner-character-streams.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/scopeinfo.cc',
- '../../src/scopeinfo.h',
- '../../src/scopes.cc',
- '../../src/scopes.h',
- '../../src/serialize.cc',
- '../../src/serialize.h',
+ '../../src/signature.h',
+ '../../src/simulator.h',
'../../src/small-pointer-list.h',
- '../../src/smart-pointers.h',
- '../../src/snapshot.h',
- '../../src/snapshot-common.cc',
- '../../src/snapshot-source-sink.cc',
- '../../src/snapshot-source-sink.h',
+ '../../src/snapshot/natives.h',
+ '../../src/snapshot/natives-common.cc',
+ '../../src/snapshot/serialize.cc',
+ '../../src/snapshot/serialize.h',
+ '../../src/snapshot/snapshot.h',
+ '../../src/snapshot/snapshot-common.cc',
+ '../../src/snapshot/snapshot-source-sink.cc',
+ '../../src/snapshot/snapshot-source-sink.h',
+ '../../src/splay-tree.h',
+ '../../src/splay-tree-inl.h',
+ '../../src/startup-data-util.cc',
+ '../../src/startup-data-util.h',
'../../src/string-builder.cc',
'../../src/string-builder.h',
- '../../src/string-search.cc',
'../../src/string-search.h',
'../../src/string-stream.cc',
'../../src/string-stream.h',
@@ -841,11 +1046,13 @@
'../../src/strtod.h',
'../../src/ic/stub-cache.cc',
'../../src/ic/stub-cache.h',
- '../../src/token.cc',
- '../../src/token.h',
+ '../../src/tracing/trace-event.cc',
+ '../../src/tracing/trace-event.h',
'../../src/transitions-inl.h',
'../../src/transitions.cc',
'../../src/transitions.h',
+ '../../src/type-cache.cc',
+ '../../src/type-cache.h',
'../../src/type-feedback-vector-inl.h',
'../../src/type-feedback-vector.cc',
'../../src/type-feedback-vector.h',
@@ -854,17 +1061,17 @@
'../../src/types-inl.h',
'../../src/types.cc',
'../../src/types.h',
- '../../src/typing.cc',
- '../../src/typing.h',
- '../../src/unbound-queue-inl.h',
- '../../src/unbound-queue.h',
+ '../../src/typing-asm.cc',
+ '../../src/typing-asm.h',
+ '../../src/typing-reset.cc',
+ '../../src/typing-reset.h',
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
+ '../../src/unicode-cache-inl.h',
+ '../../src/unicode-cache.h',
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
- '../../src/unique.h',
- '../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
'../../src/v8.cc',
@@ -872,16 +1079,33 @@
'../../src/v8memory.h',
'../../src/v8threads.cc',
'../../src/v8threads.h',
- '../../src/variables.cc',
- '../../src/variables.h',
'../../src/vector.h',
'../../src/version.cc',
'../../src/version.h',
'../../src/vm-state-inl.h',
'../../src/vm-state.h',
- '../../src/zone-inl.h',
+ '../../src/wasm/asm-wasm-builder.cc',
+ '../../src/wasm/asm-wasm-builder.h',
+ '../../src/wasm/ast-decoder.cc',
+ '../../src/wasm/ast-decoder.h',
+ '../../src/wasm/decoder.h',
+ '../../src/wasm/encoder.cc',
+ '../../src/wasm/encoder.h',
+ '../../src/wasm/module-decoder.cc',
+ '../../src/wasm/module-decoder.h',
+ '../../src/wasm/wasm-js.cc',
+ '../../src/wasm/wasm-js.h',
+ '../../src/wasm/wasm-macro-gen.h',
+ '../../src/wasm/wasm-module.cc',
+ '../../src/wasm/wasm-module.h',
+ '../../src/wasm/wasm-opcodes.cc',
+ '../../src/wasm/wasm-opcodes.h',
+ '../../src/wasm/wasm-result.cc',
+ '../../src/wasm/wasm-result.h',
'../../src/zone.cc',
'../../src/zone.h',
+ '../../src/zone-allocator.h',
+ '../../src/zone-containers.h',
'../../src/third_party/fdlibm/fdlibm.cc',
'../../src/third_party/fdlibm/fdlibm.h',
],
@@ -904,34 +1128,35 @@
'../../src/arm/constants-arm.h',
'../../src/arm/constants-arm.cc',
'../../src/arm/cpu-arm.cc',
- '../../src/arm/debug-arm.cc',
'../../src/arm/deoptimizer-arm.cc',
'../../src/arm/disasm-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
- '../../src/arm/full-codegen-arm.cc',
'../../src/arm/interface-descriptors-arm.cc',
'../../src/arm/interface-descriptors-arm.h',
- '../../src/arm/lithium-arm.cc',
- '../../src/arm/lithium-arm.h',
- '../../src/arm/lithium-codegen-arm.cc',
- '../../src/arm/lithium-codegen-arm.h',
- '../../src/arm/lithium-gap-resolver-arm.cc',
- '../../src/arm/lithium-gap-resolver-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
- '../../src/arm/regexp-macro-assembler-arm.cc',
- '../../src/arm/regexp-macro-assembler-arm.h',
'../../src/arm/simulator-arm.cc',
+ '../../src/arm/simulator-arm.h',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
+ '../../src/compiler/arm/instruction-scheduler-arm.cc',
'../../src/compiler/arm/instruction-selector-arm.cc',
- '../../src/compiler/arm/linkage-arm.cc',
+ '../../src/crankshaft/arm/lithium-arm.cc',
+ '../../src/crankshaft/arm/lithium-arm.h',
+ '../../src/crankshaft/arm/lithium-codegen-arm.cc',
+ '../../src/crankshaft/arm/lithium-codegen-arm.h',
+ '../../src/crankshaft/arm/lithium-gap-resolver-arm.cc',
+ '../../src/crankshaft/arm/lithium-gap-resolver-arm.h',
+ '../../src/debug/arm/debug-arm.cc',
+ '../../src/full-codegen/arm/full-codegen-arm.cc',
'../../src/ic/arm/access-compiler-arm.cc',
'../../src/ic/arm/handler-compiler-arm.cc',
'../../src/ic/arm/ic-arm.cc',
'../../src/ic/arm/ic-compiler-arm.cc',
'../../src/ic/arm/stub-cache-arm.cc',
+ '../../src/regexp/arm/regexp-macro-assembler-arm.cc',
+ '../../src/regexp/arm/regexp-macro-assembler-arm.h',
],
}],
['v8_target_arch=="arm64"', {
@@ -946,49 +1171,49 @@
'../../src/arm64/code-stubs-arm64.h',
'../../src/arm64/constants-arm64.h',
'../../src/arm64/cpu-arm64.cc',
- '../../src/arm64/debug-arm64.cc',
'../../src/arm64/decoder-arm64.cc',
'../../src/arm64/decoder-arm64.h',
'../../src/arm64/decoder-arm64-inl.h',
- '../../src/arm64/delayed-masm-arm64.cc',
- '../../src/arm64/delayed-masm-arm64.h',
- '../../src/arm64/delayed-masm-arm64-inl.h',
'../../src/arm64/deoptimizer-arm64.cc',
'../../src/arm64/disasm-arm64.cc',
'../../src/arm64/disasm-arm64.h',
'../../src/arm64/frames-arm64.cc',
'../../src/arm64/frames-arm64.h',
- '../../src/arm64/full-codegen-arm64.cc',
'../../src/arm64/instructions-arm64.cc',
'../../src/arm64/instructions-arm64.h',
'../../src/arm64/instrument-arm64.cc',
'../../src/arm64/instrument-arm64.h',
'../../src/arm64/interface-descriptors-arm64.cc',
'../../src/arm64/interface-descriptors-arm64.h',
- '../../src/arm64/lithium-arm64.cc',
- '../../src/arm64/lithium-arm64.h',
- '../../src/arm64/lithium-codegen-arm64.cc',
- '../../src/arm64/lithium-codegen-arm64.h',
- '../../src/arm64/lithium-gap-resolver-arm64.cc',
- '../../src/arm64/lithium-gap-resolver-arm64.h',
'../../src/arm64/macro-assembler-arm64.cc',
'../../src/arm64/macro-assembler-arm64.h',
'../../src/arm64/macro-assembler-arm64-inl.h',
- '../../src/arm64/regexp-macro-assembler-arm64.cc',
- '../../src/arm64/regexp-macro-assembler-arm64.h',
'../../src/arm64/simulator-arm64.cc',
'../../src/arm64/simulator-arm64.h',
'../../src/arm64/utils-arm64.cc',
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
+ '../../src/compiler/arm64/instruction-scheduler-arm64.cc',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
- '../../src/compiler/arm64/linkage-arm64.cc',
+ '../../src/crankshaft/arm64/delayed-masm-arm64.cc',
+ '../../src/crankshaft/arm64/delayed-masm-arm64.h',
+ '../../src/crankshaft/arm64/delayed-masm-arm64-inl.h',
+ '../../src/crankshaft/arm64/lithium-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-arm64.h',
+ '../../src/crankshaft/arm64/lithium-codegen-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-codegen-arm64.h',
+ '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.h',
+ '../../src/debug/arm64/debug-arm64.cc',
+ '../../src/full-codegen/arm64/full-codegen-arm64.cc',
'../../src/ic/arm64/access-compiler-arm64.cc',
'../../src/ic/arm64/handler-compiler-arm64.cc',
'../../src/ic/arm64/ic-arm64.cc',
'../../src/ic/arm64/ic-compiler-arm64.cc',
'../../src/ic/arm64/stub-cache-arm64.cc',
+ '../../src/regexp/arm64/regexp-macro-assembler-arm64.cc',
+ '../../src/regexp/arm64/regexp-macro-assembler-arm64.h',
],
}],
['v8_target_arch=="ia32"', {
@@ -1002,32 +1227,32 @@
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
- '../../src/ia32/debug-ia32.cc',
'../../src/ia32/deoptimizer-ia32.cc',
'../../src/ia32/disasm-ia32.cc',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
- '../../src/ia32/full-codegen-ia32.cc',
'../../src/ia32/interface-descriptors-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.h',
- '../../src/ia32/lithium-gap-resolver-ia32.cc',
- '../../src/ia32/lithium-gap-resolver-ia32.h',
- '../../src/ia32/lithium-ia32.cc',
- '../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
+ '../../src/compiler/ia32/instruction-scheduler-ia32.cc',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
- '../../src/compiler/ia32/linkage-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-codegen-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-codegen-ia32.h',
+ '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.h',
+ '../../src/crankshaft/ia32/lithium-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-ia32.h',
+ '../../src/debug/ia32/debug-ia32.cc',
+ '../../src/full-codegen/ia32/full-codegen-ia32.cc',
'../../src/ic/ia32/access-compiler-ia32.cc',
'../../src/ic/ia32/handler-compiler-ia32.cc',
'../../src/ic/ia32/ic-ia32.cc',
'../../src/ic/ia32/ic-compiler-ia32.cc',
'../../src/ic/ia32/stub-cache-ia32.cc',
+ '../../src/regexp/ia32/regexp-macro-assembler-ia32.cc',
+ '../../src/regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
['v8_target_arch=="x87"', {
@@ -1041,28 +1266,32 @@
'../../src/x87/codegen-x87.cc',
'../../src/x87/codegen-x87.h',
'../../src/x87/cpu-x87.cc',
- '../../src/x87/debug-x87.cc',
'../../src/x87/deoptimizer-x87.cc',
'../../src/x87/disasm-x87.cc',
'../../src/x87/frames-x87.cc',
'../../src/x87/frames-x87.h',
- '../../src/x87/full-codegen-x87.cc',
'../../src/x87/interface-descriptors-x87.cc',
- '../../src/x87/lithium-codegen-x87.cc',
- '../../src/x87/lithium-codegen-x87.h',
- '../../src/x87/lithium-gap-resolver-x87.cc',
- '../../src/x87/lithium-gap-resolver-x87.h',
- '../../src/x87/lithium-x87.cc',
- '../../src/x87/lithium-x87.h',
'../../src/x87/macro-assembler-x87.cc',
'../../src/x87/macro-assembler-x87.h',
- '../../src/x87/regexp-macro-assembler-x87.cc',
- '../../src/x87/regexp-macro-assembler-x87.h',
+ '../../src/compiler/x87/code-generator-x87.cc',
+ '../../src/compiler/x87/instruction-codes-x87.h',
+ '../../src/compiler/x87/instruction-scheduler-x87.cc',
+ '../../src/compiler/x87/instruction-selector-x87.cc',
+ '../../src/crankshaft/x87/lithium-codegen-x87.cc',
+ '../../src/crankshaft/x87/lithium-codegen-x87.h',
+ '../../src/crankshaft/x87/lithium-gap-resolver-x87.cc',
+ '../../src/crankshaft/x87/lithium-gap-resolver-x87.h',
+ '../../src/crankshaft/x87/lithium-x87.cc',
+ '../../src/crankshaft/x87/lithium-x87.h',
+ '../../src/debug/x87/debug-x87.cc',
+ '../../src/full-codegen/x87/full-codegen-x87.cc',
'../../src/ic/x87/access-compiler-x87.cc',
'../../src/ic/x87/handler-compiler-x87.cc',
'../../src/ic/x87/ic-x87.cc',
'../../src/ic/x87/ic-compiler-x87.cc',
'../../src/ic/x87/stub-cache-x87.cc',
+ '../../src/regexp/x87/regexp-macro-assembler-x87.cc',
+ '../../src/regexp/x87/regexp-macro-assembler-x87.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
@@ -1078,36 +1307,37 @@
'../../src/mips/constants-mips.cc',
'../../src/mips/constants-mips.h',
'../../src/mips/cpu-mips.cc',
- '../../src/mips/debug-mips.cc',
'../../src/mips/deoptimizer-mips.cc',
'../../src/mips/disasm-mips.cc',
'../../src/mips/frames-mips.cc',
'../../src/mips/frames-mips.h',
- '../../src/mips/full-codegen-mips.cc',
'../../src/mips/interface-descriptors-mips.cc',
- '../../src/mips/lithium-codegen-mips.cc',
- '../../src/mips/lithium-codegen-mips.h',
- '../../src/mips/lithium-gap-resolver-mips.cc',
- '../../src/mips/lithium-gap-resolver-mips.h',
- '../../src/mips/lithium-mips.cc',
- '../../src/mips/lithium-mips.h',
'../../src/mips/macro-assembler-mips.cc',
'../../src/mips/macro-assembler-mips.h',
- '../../src/mips/regexp-macro-assembler-mips.cc',
- '../../src/mips/regexp-macro-assembler-mips.h',
'../../src/mips/simulator-mips.cc',
+ '../../src/mips/simulator-mips.h',
'../../src/compiler/mips/code-generator-mips.cc',
'../../src/compiler/mips/instruction-codes-mips.h',
+ '../../src/compiler/mips/instruction-scheduler-mips.cc',
'../../src/compiler/mips/instruction-selector-mips.cc',
- '../../src/compiler/mips/linkage-mips.cc',
+ '../../src/crankshaft/mips/lithium-codegen-mips.cc',
+ '../../src/crankshaft/mips/lithium-codegen-mips.h',
+ '../../src/crankshaft/mips/lithium-gap-resolver-mips.cc',
+ '../../src/crankshaft/mips/lithium-gap-resolver-mips.h',
+ '../../src/crankshaft/mips/lithium-mips.cc',
+ '../../src/crankshaft/mips/lithium-mips.h',
+ '../../src/full-codegen/mips/full-codegen-mips.cc',
+ '../../src/debug/mips/debug-mips.cc',
'../../src/ic/mips/access-compiler-mips.cc',
'../../src/ic/mips/handler-compiler-mips.cc',
'../../src/ic/mips/ic-mips.cc',
'../../src/ic/mips/ic-compiler-mips.cc',
'../../src/ic/mips/stub-cache-mips.cc',
+ '../../src/regexp/mips/regexp-macro-assembler-mips.cc',
+ '../../src/regexp/mips/regexp-macro-assembler-mips.h',
],
}],
- ['v8_target_arch=="mips64el"', {
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'sources': [ ### gcmole(arch:mips64el) ###
'../../src/mips64/assembler-mips64.cc',
'../../src/mips64/assembler-mips64.h',
@@ -1120,37 +1350,44 @@
'../../src/mips64/constants-mips64.cc',
'../../src/mips64/constants-mips64.h',
'../../src/mips64/cpu-mips64.cc',
- '../../src/mips64/debug-mips64.cc',
'../../src/mips64/deoptimizer-mips64.cc',
'../../src/mips64/disasm-mips64.cc',
'../../src/mips64/frames-mips64.cc',
'../../src/mips64/frames-mips64.h',
- '../../src/mips64/full-codegen-mips64.cc',
'../../src/mips64/interface-descriptors-mips64.cc',
- '../../src/mips64/lithium-codegen-mips64.cc',
- '../../src/mips64/lithium-codegen-mips64.h',
- '../../src/mips64/lithium-gap-resolver-mips64.cc',
- '../../src/mips64/lithium-gap-resolver-mips64.h',
- '../../src/mips64/lithium-mips64.cc',
- '../../src/mips64/lithium-mips64.h',
'../../src/mips64/macro-assembler-mips64.cc',
'../../src/mips64/macro-assembler-mips64.h',
- '../../src/mips64/regexp-macro-assembler-mips64.cc',
- '../../src/mips64/regexp-macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
+ '../../src/mips64/simulator-mips64.h',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
+ '../../src/compiler/mips64/instruction-scheduler-mips64.cc',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
- '../../src/compiler/mips64/linkage-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-codegen-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-codegen-mips64.h',
+ '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.h',
+ '../../src/crankshaft/mips64/lithium-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-mips64.h',
+ '../../src/debug/mips64/debug-mips64.cc',
+ '../../src/full-codegen/mips64/full-codegen-mips64.cc',
'../../src/ic/mips64/access-compiler-mips64.cc',
'../../src/ic/mips64/handler-compiler-mips64.cc',
'../../src/ic/mips64/ic-mips64.cc',
'../../src/ic/mips64/ic-compiler-mips64.cc',
'../../src/ic/mips64/stub-cache-mips64.cc',
+ '../../src/regexp/mips64/regexp-macro-assembler-mips64.cc',
+ '../../src/regexp/mips64/regexp-macro-assembler-mips64.h',
],
}],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
'sources': [ ### gcmole(arch:x64) ###
+ '../../src/crankshaft/x64/lithium-codegen-x64.cc',
+ '../../src/crankshaft/x64/lithium-codegen-x64.h',
+ '../../src/crankshaft/x64/lithium-gap-resolver-x64.cc',
+ '../../src/crankshaft/x64/lithium-gap-resolver-x64.h',
+ '../../src/crankshaft/x64/lithium-x64.cc',
+ '../../src/crankshaft/x64/lithium-x64.h',
'../../src/x64/assembler-x64-inl.h',
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
@@ -1160,36 +1397,74 @@
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
- '../../src/x64/debug-x64.cc',
'../../src/x64/deoptimizer-x64.cc',
'../../src/x64/disasm-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
- '../../src/x64/full-codegen-x64.cc',
'../../src/x64/interface-descriptors-x64.cc',
- '../../src/x64/lithium-codegen-x64.cc',
- '../../src/x64/lithium-codegen-x64.h',
- '../../src/x64/lithium-gap-resolver-x64.cc',
- '../../src/x64/lithium-gap-resolver-x64.h',
- '../../src/x64/lithium-x64.cc',
- '../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc',
'../../src/x64/macro-assembler-x64.h',
- '../../src/x64/regexp-macro-assembler-x64.cc',
- '../../src/x64/regexp-macro-assembler-x64.h',
+ '../../src/debug/x64/debug-x64.cc',
+ '../../src/full-codegen/x64/full-codegen-x64.cc',
'../../src/ic/x64/access-compiler-x64.cc',
'../../src/ic/x64/handler-compiler-x64.cc',
'../../src/ic/x64/ic-x64.cc',
'../../src/ic/x64/ic-compiler-x64.cc',
'../../src/ic/x64/stub-cache-x64.cc',
+ '../../src/regexp/x64/regexp-macro-assembler-x64.cc',
+ '../../src/regexp/x64/regexp-macro-assembler-x64.h',
],
}],
['v8_target_arch=="x64"', {
'sources': [
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
+ '../../src/compiler/x64/instruction-scheduler-x64.cc',
'../../src/compiler/x64/instruction-selector-x64.cc',
- '../../src/compiler/x64/linkage-x64.cc',
+ ],
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ '../../src/compiler/ppc/code-generator-ppc.cc',
+ '../../src/compiler/ppc/instruction-codes-ppc.h',
+ '../../src/compiler/ppc/instruction-scheduler-ppc.cc',
+ '../../src/compiler/ppc/instruction-selector-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-ppc.h',
+ '../../src/crankshaft/ppc/lithium-codegen-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-codegen-ppc.h',
+ '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.h',
+ '../../src/debug/ppc/debug-ppc.cc',
+ '../../src/full-codegen/ppc/full-codegen-ppc.cc',
+ '../../src/ic/ppc/access-compiler-ppc.cc',
+ '../../src/ic/ppc/handler-compiler-ppc.cc',
+ '../../src/ic/ppc/ic-ppc.cc',
+ '../../src/ic/ppc/ic-compiler-ppc.cc',
+ '../../src/ic/ppc/stub-cache-ppc.cc',
+ '../../src/ppc/assembler-ppc-inl.h',
+ '../../src/ppc/assembler-ppc.cc',
+ '../../src/ppc/assembler-ppc.h',
+ '../../src/ppc/builtins-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.cc',
+ '../../src/ppc/code-stubs-ppc.h',
+ '../../src/ppc/codegen-ppc.cc',
+ '../../src/ppc/codegen-ppc.h',
+ '../../src/ppc/constants-ppc.h',
+ '../../src/ppc/constants-ppc.cc',
+ '../../src/ppc/cpu-ppc.cc',
+ '../../src/ppc/deoptimizer-ppc.cc',
+ '../../src/ppc/disasm-ppc.cc',
+ '../../src/ppc/frames-ppc.cc',
+ '../../src/ppc/frames-ppc.h',
+ '../../src/ppc/interface-descriptors-ppc.cc',
+ '../../src/ppc/interface-descriptors-ppc.h',
+ '../../src/ppc/macro-assembler-ppc.cc',
+ '../../src/ppc/macro-assembler-ppc.h',
+ '../../src/ppc/simulator-ppc.cc',
+ '../../src/ppc/simulator-ppc.h',
+ '../../src/regexp/ppc/regexp-macro-assembler-ppc.cc',
+ '../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
['OS=="win"', {
@@ -1197,6 +1472,10 @@
'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
'msvs_disabled_warnings': [4351, 4355, 4800],
+ # When building Official, the .lib is too large and exceeds the 2G
+ # limit. This breaks it into multiple pieces to avoid the limit.
+ # See http://crbug.com/485155.
+ 'msvs_shard': 4,
}],
['component=="shared_library"', {
'defines': [
@@ -1248,12 +1527,16 @@
'../..',
],
'sources': [
+ '../../src/base/adapters.h',
'../../src/base/atomicops.h',
'../../src/base/atomicops_internals_arm64_gcc.h',
'../../src/base/atomicops_internals_arm_gcc.h',
'../../src/base/atomicops_internals_atomicword_compat.h',
'../../src/base/atomicops_internals_mac.h',
'../../src/base/atomicops_internals_mips_gcc.h',
+ '../../src/base/atomicops_internals_mips64_gcc.h',
+ '../../src/base/atomicops_internals_portable.h',
+ '../../src/base/atomicops_internals_ppc_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
@@ -1290,6 +1573,7 @@
'../../src/base/safe_conversions_impl.h',
'../../src/base/safe_math.h',
'../../src/base/safe_math_impl.h',
+ '../../src/base/smart-pointers.h',
'../../src/base/sys-info.cc',
'../../src/base/sys-info.h',
'../../src/base/utils/random-number-generator.cc',
@@ -1306,6 +1590,7 @@
['nacl_target_arch=="none"', {
'link_settings': {
'libraries': [
+ '-ldl',
'-lrt'
],
},
@@ -1325,6 +1610,20 @@
'sources': [
'../../src/base/platform/platform-posix.cc'
],
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ # Only include libdl and librt on host builds because they
+ # are included by default on Android target builds, and we
+ # don't want to re-include them here since this will change
+ # library order and break (see crbug.com/469973).
+ 'libraries': [
+ '-ldl',
+ '-lrt'
+ ]
+ }]
+ ]
+ },
'conditions': [
['host_os=="mac"', {
'target_conditions': [
@@ -1339,28 +1638,6 @@
}],
],
}, {
- # TODO(bmeurer): What we really want here, is this:
- #
- # 'link_settings': {
- # 'target_conditions': [
- # ['_toolset=="host"', {
- # 'libraries': [
- # '-lrt'
- # ]
- # }]
- # ]
- # },
- #
- # but we can't do this right now, as the AOSP does not support
- # linking against the host librt, so we need to work around this
- # for now, using the following hack (see platform/time.cc):
- 'target_conditions': [
- ['_toolset=="host"', {
- 'defines': [
- 'V8_LIBRT_NOT_AVAILABLE=1',
- ],
- }],
- ],
'sources': [
'../../src/base/platform/platform-linux.cc'
]
@@ -1439,6 +1716,12 @@
],
}
],
+ ['OS=="aix"', {
+ 'sources': [
+ '../../src/base/platform/platform-aix.cc',
+ '../../src/base/platform/platform-posix.cc'
+ ]},
+ ],
['OS=="solaris"', {
'link_settings': {
'libraries': [
@@ -1545,6 +1828,8 @@
'../../tools/concatenate-files.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -1595,7 +1880,7 @@
['v8_enable_i18n_support==1', {
'variables': {
'i18n_library_files': [
- '../../src/i18n.js',
+ '../../src/js/i18n.js',
],
},
}, {
@@ -1606,49 +1891,56 @@
],
'variables': {
'library_files': [
- '../../src/runtime.js',
- '../../src/v8natives.js',
- '../../src/symbol.js',
- '../../src/array.js',
- '../../src/string.js',
- '../../src/uri.js',
+ '../../src/js/macros.py',
+ '../../src/messages.h',
+ '../../src/js/prologue.js',
+ '../../src/js/runtime.js',
+ '../../src/js/v8natives.js',
+ '../../src/js/symbol.js',
+ '../../src/js/array.js',
+ '../../src/js/string.js',
+ '../../src/js/uri.js',
+ '../../src/js/math.js',
'../../src/third_party/fdlibm/fdlibm.js',
- '../../src/math.js',
- '../../src/apinatives.js',
- '../../src/date.js',
- '../../src/regexp.js',
- '../../src/arraybuffer.js',
- '../../src/typedarray.js',
- '../../src/generator.js',
- '../../src/object-observe.js',
- '../../src/collection.js',
- '../../src/weak-collection.js',
- '../../src/collection-iterator.js',
- '../../src/promise.js',
- '../../src/messages.js',
- '../../src/json.js',
- '../../src/array-iterator.js',
- '../../src/string-iterator.js',
- '../../src/debug-debugger.js',
- '../../src/mirror-debugger.js',
- '../../src/liveedit-debugger.js',
- '../../src/macros.py',
+ '../../src/js/regexp.js',
+ '../../src/js/arraybuffer.js',
+ '../../src/js/typedarray.js',
+ '../../src/js/iterator-prototype.js',
+ '../../src/js/generator.js',
+ '../../src/js/object-observe.js',
+ '../../src/js/collection.js',
+ '../../src/js/weak-collection.js',
+ '../../src/js/collection-iterator.js',
+ '../../src/js/promise.js',
+ '../../src/js/messages.js',
+ '../../src/js/json.js',
+ '../../src/js/array-iterator.js',
+ '../../src/js/string-iterator.js',
+ '../../src/js/templates.js',
+ '../../src/js/spread.js',
+ '../../src/debug/mirrors.js',
+ '../../src/debug/debug.js',
+ '../../src/debug/liveedit.js',
],
'experimental_library_files': [
- '../../src/macros.py',
- '../../src/proxy.js',
- '../../src/generator.js',
- '../../src/harmony-string.js',
- '../../src/harmony-array.js',
- '../../src/harmony-array-includes.js',
- '../../src/harmony-tostring.js',
- '../../src/harmony-typedarray.js',
- '../../src/harmony-classes.js',
- '../../src/harmony-templates.js',
- '../../src/harmony-regexp.js'
+ '../../src/js/macros.py',
+ '../../src/messages.h',
+ '../../src/js/proxy.js',
+ '../../src/js/generator.js',
+ '../../src/js/harmony-atomics.js',
+ '../../src/js/harmony-regexp.js',
+ '../../src/js/harmony-reflect.js',
+ '../../src/js/harmony-object-observe.js',
+ '../../src/js/harmony-sharedarraybuffer.js',
+ '../../src/js/harmony-simd.js',
+ '../../src/js/harmony-species.js',
+ '../../src/js/harmony-unicode-regexps.js',
+ '../../src/js/promise-extra.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+ 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
},
'actions': [
{
@@ -1656,11 +1948,26 @@
'inputs': [
'../../tools/js2c.py',
'<@(library_files)',
- '<@(i18n_library_files)',
+ '<@(i18n_library_files)'
],
- 'outputs': [
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ '<@(i18n_library_files)'
+ ],
+ },
+ {
+ 'action_name': 'js2c_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(library_files)',
+ '<@(i18n_library_files)'
],
+ 'outputs': ['<@(libraries_bin_file)'],
'action': [
'python',
'../../tools/js2c.py',
@@ -1668,14 +1975,8 @@
'CORE',
'<@(library_files)',
'<@(i18n_library_files)',
- ],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_bin_file)',
- ],
- }],
+ '--startup_blob', '<@(libraries_bin_file)',
+ '--nojs',
],
},
{
@@ -1684,23 +1985,96 @@
'../../tools/js2c.py',
'<@(experimental_library_files)',
],
- 'outputs': [
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ 'EXPERIMENTAL',
+ '<@(experimental_library_files)'
],
+ },
+ {
+ 'action_name': 'js2c_experimental_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(experimental_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_bin_file)'],
'action': [
'python',
'../../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'EXPERIMENTAL',
- '<@(experimental_library_files)'
+ '<@(experimental_library_files)',
+ '--startup_blob', '<@(libraries_experimental_bin_file)',
+ '--nojs',
],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_experimental_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_experimental_bin_file)'
- ],
- }],
+ },
+ {
+ 'action_name': 'js2c_extras',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_extras_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ '--startup_blob', '<@(libraries_extras_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ ],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
+ '--nojs',
],
},
],
@@ -1741,7 +2115,7 @@
'../..',
],
'sources': [
- '../../src/mksnapshot.cc',
+ '../../src/snapshot/mksnapshot.cc',
],
'conditions': [
['v8_enable_i18n_support==1', {
diff --git a/tools/isolate_driver.py b/tools/isolate_driver.py
new file mode 100644
index 00000000..d1b39b09
--- /dev/null
+++ b/tools/isolate_driver.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Adaptor script called through build/isolate.gypi.
+
+Slimmed down version of chromium's isolate driver that doesn't process dynamic
+dependencies.
+"""
+
+import json
+import logging
+import os
+import subprocess
+import sys
+
+TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def prepare_isolate_call(args, output):
+ """Gathers all information required to run isolate.py later.
+
+ Dumps it as JSON to |output| file.
+ """
+ with open(output, 'wb') as f:
+ json.dump({
+ 'args': args,
+ 'dir': os.getcwd(),
+ 'version': 1,
+ }, f, indent=2, sort_keys=True)
+
+
+def main():
+ logging.basicConfig(level=logging.ERROR, format='%(levelname)7s %(message)s')
+ if len(sys.argv) < 2:
+ print >> sys.stderr, 'Internal failure; mode required'
+ return 1
+ mode = sys.argv[1]
+ args = sys.argv[1:]
+ isolate = None
+ isolated = None
+ for i, arg in enumerate(args):
+ if arg == '--isolate':
+ isolate = i + 1
+ if arg == '--isolated':
+ isolated = i + 1
+ if not isolate or not isolated:
+ print >> sys.stderr, 'Internal failure'
+ return 1
+
+ # In 'prepare' mode just collect all required information for postponed
+ # isolated.py invocation later, store it in *.isolated.gen.json file.
+ if mode == 'prepare':
+ prepare_isolate_call(args[1:], args[isolated] + '.gen.json')
+ return 0
+
+ swarming_client = os.path.join(TOOLS_DIR, 'swarming_client')
+ sys.stdout.flush()
+ return subprocess.call(
+ [sys.executable, os.path.join(swarming_client, 'isolate.py')] + args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/js2c.py b/tools/js2c.py
index 621ed5a2..d9151331 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -31,10 +31,9 @@
# char arrays. It is used for embedded JavaScript code in the V8
# library.
-import os, re, sys, string
+import os, re
import optparse
import jsmin
-import bz2
import textwrap
@@ -69,6 +68,9 @@ def ReadFile(filename):
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
+INVALID_ERROR_MESSAGE_PATTERN = re.compile(
+ r'Make(?!Generic)\w*Error\(([kA-Z]\w+)')
+NEW_ERROR_PATTERN = re.compile(r'new \$\w*Error\((?!\))')
def Validate(lines):
# Because of simplified context setup, eval and with is not
@@ -77,7 +79,11 @@ def Validate(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
-
+ invalid_error = INVALID_ERROR_MESSAGE_PATTERN.search(lines)
+ if invalid_error:
+ raise Error("Unknown error message template '%s'" % invalid_error.group(1))
+ if NEW_ERROR_PATTERN.search(lines):
+ raise Error("Error constructed without message template.")
# Pass lines through unchanged.
return lines
@@ -101,6 +107,9 @@ def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
+ if arg_index[0] >= len(macro.args):
+ lineno = lines.count(os.linesep, 0, start) + 1
+ raise Error('line %s: Too many arguments for macro "%s"' % (lineno, name_pattern.pattern))
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
@@ -151,7 +160,7 @@ class PythonMacro:
args.append(mapping[arg])
return str(self.fun(*args))
-CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
+CONST_PATTERN = re.compile(r'^define\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
@@ -188,6 +197,21 @@ def ReadMacros(lines):
raise Error("Illegal line: " + line)
return (constants, macros)
+
+TEMPLATE_PATTERN = re.compile(r'^\s+T\(([A-Z][a-zA-Z0-9]*),')
+
+def ReadMessageTemplates(lines):
+ templates = []
+ index = 0
+ for line in lines.split('\n'):
+ template_match = TEMPLATE_PATTERN.match(line)
+ if template_match:
+ name = "k%s" % template_match.group(1)
+ value = index
+ index = index + 1
+ templates.append((re.compile("\\b%s\\b" % name), value))
+ return templates
+
INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
@@ -218,7 +242,7 @@ def ExpandInlineMacros(lines):
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
-INLINE_CONSTANT_PATTERN = re.compile(r'const\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]')
+INLINE_CONSTANT_PATTERN = re.compile(r'define\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+);\n')
def ExpandInlineConstants(lines):
pos = 0
@@ -247,7 +271,7 @@ HEADER_TEMPLATE = """\
// javascript source files or the GYP script.
#include "src/v8.h"
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "src/utils.h"
namespace v8 {
@@ -311,21 +335,25 @@ GET_SCRIPT_NAME_CASE = """\
"""
-def BuildFilterChain(macro_filename):
+def BuildFilterChain(macro_filename, message_template_file):
"""Build the chain of filter functions to be applied to the sources.
Args:
macro_filename: Name of the macro file, if any.
Returns:
- A function (string -> string) that reads a source file and processes it.
+ A function (string -> string) that processes a source file.
"""
- filter_chain = [ReadFile]
+ filter_chain = []
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
- filter_chain.append(lambda l: ExpandConstants(l, consts))
filter_chain.append(lambda l: ExpandMacros(l, macros))
+ filter_chain.append(lambda l: ExpandConstants(l, consts))
+
+ if message_template_file:
+ message_templates = ReadMessageTemplates(ReadFile(message_template_file))
+ filter_chain.append(lambda l: ExpandConstants(l, message_templates))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
@@ -340,6 +368,8 @@ def BuildFilterChain(macro_filename):
return reduce(chain, filter_chain)
+def BuildExtraFilterChain():
+ return lambda x: RemoveCommentsAndTrailingWhitespace(Validate(x))
class Sources:
def __init__(self):
@@ -349,18 +379,25 @@ class Sources:
def IsDebuggerFile(filename):
- return filename.endswith("-debugger.js")
+ return "debug" in filename
def IsMacroFile(filename):
return filename.endswith("macros.py")
+def IsMessageTemplateFile(filename):
+ return filename.endswith("messages.h")
+
-def PrepareSources(source_files):
+def PrepareSources(source_files, native_type, emit_js):
"""Read, prepare and assemble the list of source files.
Args:
- sources: List of Javascript-ish source files. A file named macros.py
+ source_files: List of JavaScript-ish source files. A file named macros.py
will be treated as a list of macros.
+ native_type: String corresponding to a NativeType enum value, allowing us
+ to treat different types of sources differently.
+ emit_js: True if we should skip the byte conversion and just leave the
+ sources as JS strings.
Returns:
An instance of Sources.
@@ -372,26 +409,48 @@ def PrepareSources(source_files):
source_files.remove(macro_files[0])
macro_file = macro_files[0]
- filters = BuildFilterChain(macro_file)
+ message_template_file = None
+ message_template_files = filter(IsMessageTemplateFile, source_files)
+ assert len(message_template_files) in [0, 1]
+ if message_template_files:
+ source_files.remove(message_template_files[0])
+ message_template_file = message_template_files[0]
+
+ filters = None
+ if native_type in ("EXTRAS", "EXPERIMENTAL_EXTRAS"):
+ filters = BuildExtraFilterChain()
+ else:
+ filters = BuildFilterChain(macro_file, message_template_file)
# Sort 'debugger' sources first.
source_files = sorted(source_files,
lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
+ source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
+
+ # Have a single not-quite-empty source file if there are none present;
+ # otherwise you get errors trying to compile an empty C++ array.
+ # It cannot be empty (or whitespace, which gets trimmed to empty), as
+ # the deserialization code assumes each file is nonempty.
+ if not source_files_and_contents:
+ source_files_and_contents = [("dummy.js", "(function() {})")]
+
result = Sources()
- for source in source_files:
+
+ for (source, contents) in source_files_and_contents:
try:
- lines = filters(source)
+ lines = filters(contents)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
- result.modules.append(lines);
+ result.modules.append(lines)
is_debugger = IsDebuggerFile(source)
- result.is_debugger_id.append(is_debugger);
+ result.is_debugger_id.append(is_debugger)
name = os.path.basename(source)[:-3]
- result.names.append(name if not is_debugger else name[:-9]);
+ result.names.append(name)
+
return result
@@ -495,39 +554,50 @@ def WriteStartupBlob(sources, startup_blob):
output.close()
-def JS2C(source, target, native_type, raw_file, startup_blob):
- sources = PrepareSources(source)
- sources_bytes = "".join(sources.modules)
- metadata = BuildMetadata(sources, sources_bytes, native_type)
+def JS2C(sources, target, native_type, raw_file, startup_blob, emit_js):
+ prepared_sources = PrepareSources(sources, native_type, emit_js)
+ sources_output = "".join(prepared_sources.modules)
+ metadata = BuildMetadata(prepared_sources, sources_output, native_type)
# Optionally emit raw file.
if raw_file:
output = open(raw_file, "w")
- output.write(sources_bytes)
+ output.write(sources_output)
output.close()
if startup_blob:
- WriteStartupBlob(sources, startup_blob);
+ WriteStartupBlob(prepared_sources, startup_blob)
# Emit resulting source file.
output = open(target, "w")
- output.write(HEADER_TEMPLATE % metadata)
+ if emit_js:
+ output.write(sources_output)
+ else:
+ output.write(HEADER_TEMPLATE % metadata)
output.close()
def main():
parser = optparse.OptionParser()
- parser.add_option("--raw", action="store",
+ parser.add_option("--raw",
help="file to write the processed sources array to.")
- parser.add_option("--startup_blob", action="store",
+ parser.add_option("--startup_blob",
help="file to write the startup blob to.")
+ parser.add_option("--js",
+ help="writes a JS file output instead of a C file",
+ action="store_true", default=False, dest='js')
+ parser.add_option("--nojs", action="store_false", default=False, dest='js')
parser.set_usage("""js2c out.cc type sources.js ...
- out.cc: C code to be generated.
- type: type parameter for NativesCollection template.
- sources.js: JS internal sources or macros.py.""")
+ out.cc: C code to be generated.
+ type: type parameter for NativesCollection template.
+ sources.js: JS internal sources or macros.py.""")
(options, args) = parser.parse_args()
-
- JS2C(args[2:], args[0], args[1], options.raw, options.startup_blob)
+ JS2C(args[2:],
+ args[0],
+ args[1],
+ options.raw,
+ options.startup_blob,
+ options.js)
if __name__ == "__main__":
diff --git a/tools/jsmin.py b/tools/jsmin.py
index 250dea9d..236f511d 100644
--- a/tools/jsmin.py
+++ b/tools/jsmin.py
@@ -100,6 +100,12 @@ class JavaScriptMinifier(object):
The string that should replace the match in the rewritten program.
"""
matched_text = m.group(0)
+
+ if matched_text.startswith("`") and matched_text.endswith("`"):
+ return re.sub(r"\$\{([\w$%]+)\}",
+ lambda m: '${' + self.FindNewName(m.group(1)) + '}',
+ matched_text)
+
if matched_text == "{":
self.Push()
return matched_text
@@ -152,6 +158,9 @@ class JavaScriptMinifier(object):
return self.map[var_name]
if self.nesting == 0:
return var_name
+ # Do not rename arguments object.
+ if var_name == 'arguments':
+ return 'arguments'
while True:
identifier_first_char = self.identifier_counter % 52
identifier_second_char = self.identifier_counter // 52
@@ -184,6 +193,8 @@ class JavaScriptMinifier(object):
return entire_match
if re.match(r'".*"$', entire_match):
return entire_match
+ if re.match(r"`.*`$", entire_match):
+ return entire_match
if re.match(r"/.+/$", entire_match):
return entire_match
return replacement
@@ -227,8 +238,10 @@ class JavaScriptMinifier(object):
# This regexp can handle embedded backslash-escaped characters including
# embedded backslash-escaped double quotes.
double_quoted_string = r'"(?:[^"\\]|\\.)*"'
- # A regexp that matches a literal string surrounded by 'double quotes'.
+ # A regexp that matches a literal string surrounded by 'single quotes'.
single_quoted_string = r"'(?:[^'\\]|\\.)*'"
+ # A regexp that matches a template string
+ template_string = r"`(?:[^`\\]|\\.)*`"
# A regexp that matches a regexp literal surrounded by /slashes/.
# Don't allow a regexp to have a ) before the first ( since that's a
# syntax error and it's probably just two unrelated slashes.
@@ -238,6 +251,7 @@ class JavaScriptMinifier(object):
# Replace multiple spaces with a single space.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
+ template_string,
slash_quoted_regexp,
"( )+"]),
self.RemoveSpaces,
@@ -246,6 +260,7 @@ class JavaScriptMinifier(object):
# and after the space. % and $ are counted as identifier characters.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
+ template_string,
slash_quoted_regexp,
r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
self.RemoveSpaces,
@@ -269,6 +284,7 @@ class JavaScriptMinifier(object):
variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
+ template_string,
slash_quoted_regexp,
r"\{", # Curly braces.
r"\}",
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 409b3969..7dac2e05 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -568,7 +568,7 @@ PERF_EVENT_HEADER_DESC = Descriptor([
])
-# Reference: kernel/events/core.c
+# Reference: kernel/tools/perf/util/event.h
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
@@ -577,6 +577,20 @@ PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pgoff", "u64")
])
+# Reference: kernel/tools/perf/util/event.h
+PERF_MMAP2_EVENT_BODY_DESC = Descriptor([
+ ("pid", "u32"),
+ ("tid", "u32"),
+ ("addr", "u64"),
+ ("len", "u64"),
+ ("pgoff", "u64"),
+ ("maj", "u32"),
+ ("min", "u32"),
+ ("ino", "u64"),
+ ("ino_generation", "u64"),
+ ("prot", "u32"),
+ ("flags","u32")
+])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
@@ -616,6 +630,7 @@ PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
+PERF_RECORD_MMAP2 = 10
PERF_RECORD_SAMPLE = 9
@@ -664,6 +679,15 @@ class TraceReader(object):
mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
+ def ReadMmap2(self, header, offset):
+ mmap_info = PERF_MMAP2_EVENT_BODY_DESC.Read(self.trace,
+ offset + self.header_size)
+ # Read null-terminated filename.
+ filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+ offset + header.size]
+ mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
+ return mmap_info
+
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
@@ -711,6 +735,23 @@ class LibraryRepo(object):
self.names = set()
self.ticks = {}
+
+ def HasDynamicSymbols(self, filename):
+ if filename.endswith(".ko"): return False
+ process = subprocess.Popen(
+ "%s -h %s" % (OBJDUMP_BIN, filename),
+ shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ pipe = process.stdout
+ try:
+ for line in pipe:
+ match = OBJDUMP_SECTION_HEADER_RE.match(line)
+ if match and match.group(1) == 'dynsym': return True
+ finally:
+ pipe.close()
+ assert process.wait() == 0, "Failed to objdump -h %s" % filename
+ return False
+
+
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
@@ -730,10 +771,10 @@ class LibraryRepo(object):
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
- if mmap_info.filename.endswith(".ko"):
- dynamic_symbols = ""
- else:
+ if self.HasDynamicSymbols(mmap_info.filename):
dynamic_symbols = "-T"
+ else:
+ dynamic_symbols = ""
process = subprocess.Popen(
"%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -956,6 +997,14 @@ if __name__ == "__main__":
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
+ elif header.type == PERF_RECORD_MMAP2:
+ start = time.time()
+ mmap_info = trace_reader.ReadMmap2(header, offset)
+ if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
+ log_reader.ReadUpToGC()
+ else:
+ library_repo.Load(mmap_info, code_map, options)
+ mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
diff --git a/tools/logreader.js b/tools/logreader.js
index 5f0ec7f6..157a7fc8 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -35,15 +35,31 @@
*
* @param {Array.<Object>} dispatchTable A table used for parsing and processing
* log records.
+ * @param {boolean} timedRange Ignore ticks outside timed range.
+ * @param {boolean} pairwiseTimedRange Ignore ticks outside pairs of timer
+ * markers.
* @constructor
*/
-function LogReader(dispatchTable) {
+function LogReader(dispatchTable, timedRange, pairwiseTimedRange) {
/**
* @type {Array.<Object>}
*/
this.dispatchTable_ = dispatchTable;
/**
+ * @type {boolean}
+ */
+ this.timedRange_ = timedRange;
+
+ /**
+ * @type {boolean}
+ */
+ this.pairwiseTimedRange_ = pairwiseTimedRange;
+ if (pairwiseTimedRange) {
+ this.timedRange_ = true;
+ }
+
+ /**
* Current line.
* @type {number}
*/
@@ -54,6 +70,18 @@ function LogReader(dispatchTable) {
* @type {CsvParser}
*/
this.csvParser_ = new CsvParser();
+
+ /**
+ * Keeps track of whether we've seen a "current-time" tick yet.
+ * @type {boolean}
+ */
+ this.hasSeenTimerMarker_ = false;
+
+ /**
+ * List of log lines seen since last "current-time" tick.
+ * @type {Array.<String>}
+ */
+ this.logLinesSinceLastTimerMarker_ = [];
};
@@ -83,7 +111,28 @@ LogReader.prototype.processLogChunk = function(chunk) {
* @param {string} line A line of log.
*/
LogReader.prototype.processLogLine = function(line) {
- this.processLog_([line]);
+ if (!this.timedRange_) {
+ this.processLog_([line]);
+ return;
+ }
+ if (line.startsWith("current-time")) {
+ if (this.hasSeenTimerMarker_) {
+ this.processLog_(this.logLinesSinceLastTimerMarker_);
+ this.logLinesSinceLastTimerMarker_ = [];
+ // In pairwise mode, a "current-time" line ends the timed range.
+ if (this.pairwiseTimedRange_) {
+ this.hasSeenTimerMarker_ = false;
+ }
+ } else {
+ this.hasSeenTimerMarker_ = true;
+ }
+ } else {
+ if (this.hasSeenTimerMarker_) {
+ this.logLinesSinceLastTimerMarker_.push(line);
+ } else if (!line.startsWith("tick")) {
+ this.processLog_([line]);
+ }
+ }
};
diff --git a/tools/luci-go/linux64/isolate.sha1 b/tools/luci-go/linux64/isolate.sha1
new file mode 100644
index 00000000..c2821fca
--- /dev/null
+++ b/tools/luci-go/linux64/isolate.sha1
@@ -0,0 +1 @@
+32a3d49a4f7279ad022f346f7d960b2d58e2a0fe \ No newline at end of file
diff --git a/tools/luci-go/mac64/isolate.sha1 b/tools/luci-go/mac64/isolate.sha1
new file mode 100644
index 00000000..fcb6c8fa
--- /dev/null
+++ b/tools/luci-go/mac64/isolate.sha1
@@ -0,0 +1 @@
+83306c575904ec92c1af9ccc67240d26069df337 \ No newline at end of file
diff --git a/tools/luci-go/win64/isolate.exe.sha1 b/tools/luci-go/win64/isolate.exe.sha1
new file mode 100644
index 00000000..032483cb
--- /dev/null
+++ b/tools/luci-go/win64/isolate.exe.sha1
@@ -0,0 +1 @@
+da358c2666ef9b89022e0eadf363cc6e123384e2 \ No newline at end of file
diff --git a/tools/ninja/ninja_output.py b/tools/ninja/ninja_output.py
new file mode 100644
index 00000000..ec4d27e0
--- /dev/null
+++ b/tools/ninja/ninja_output.py
@@ -0,0 +1,44 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import os.path
+
+
+def GetNinjaOutputDirectory(v8_root, configuration=None):
+ """Returns <v8_root>/<output_dir>/(Release|Debug).
+
+ The configuration chosen is the one most recently generated/built, but can be
+ overriden via the <configuration> parameter. Detects a custom output_dir
+ specified by GYP_GENERATOR_FLAGS."""
+
+ output_dir = 'out'
+ generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
+ for flag in generator_flags:
+ name_value = flag.split('=', 1)
+ if len(name_value) == 2 and name_value[0] == 'output_dir':
+ output_dir = name_value[1]
+
+ root = os.path.join(v8_root, output_dir)
+ if configuration:
+ return os.path.join(root, configuration)
+
+ debug_path = os.path.join(root, 'Debug')
+ release_path = os.path.join(root, 'Release')
+
+ def is_release_newer(test_path):
+ try:
+ debug_mtime = os.path.getmtime(os.path.join(debug_path, test_path))
+ except os.error:
+ debug_mtime = 0
+ try:
+ rel_mtime = os.path.getmtime(os.path.join(release_path, test_path))
+ except os.error:
+ rel_mtime = 0
+ return rel_mtime >= debug_mtime
+
+ if is_release_newer('.ninja_log') or is_release_newer('.ninja_deps'):
+ return release_path
+ return debug_path
diff --git a/tools/oom_dump/oom_dump.cc b/tools/oom_dump/oom_dump.cc
index 60e06853..581e1914 100644
--- a/tools/oom_dump/oom_dump.cc
+++ b/tools/oom_dump/oom_dump.cc
@@ -165,25 +165,23 @@ void DumpHeapStats(const char *minidump_file) {
const int new_space_size = READ_FIELD(1);
const int new_space_capacity = READ_FIELD(2);
- const int old_pointer_space_size = READ_FIELD(3);
- const int old_pointer_space_capacity = READ_FIELD(4);
- const int old_data_space_size = READ_FIELD(5);
- const int old_data_space_capacity = READ_FIELD(6);
- const int code_space_size = READ_FIELD(7);
- const int code_space_capacity = READ_FIELD(8);
- const int map_space_size = READ_FIELD(9);
- const int map_space_capacity = READ_FIELD(10);
- const int cell_space_size = READ_FIELD(11);
- const int cell_space_capacity = READ_FIELD(12);
- const int lo_space_size = READ_FIELD(13);
- const int global_handle_count = READ_FIELD(14);
- const int weak_global_handle_count = READ_FIELD(15);
- const int pending_global_handle_count = READ_FIELD(16);
- const int near_death_global_handle_count = READ_FIELD(17);
- const int destroyed_global_handle_count = READ_FIELD(18);
- const int memory_allocator_size = READ_FIELD(19);
- const int memory_allocator_capacity = READ_FIELD(20);
- const int os_error = READ_FIELD(23);
+ const int old_space_size = READ_FIELD(3);
+ const int old_space_capacity = READ_FIELD(4);
+ const int code_space_size = READ_FIELD(5);
+ const int code_space_capacity = READ_FIELD(6);
+ const int map_space_size = READ_FIELD(7);
+ const int map_space_capacity = READ_FIELD(8);
+ const int cell_space_size = READ_FIELD(9);
+ const int cell_space_capacity = READ_FIELD(10);
+ const int lo_space_size = READ_FIELD(11);
+ const int global_handle_count = READ_FIELD(12);
+ const int weak_global_handle_count = READ_FIELD(13);
+ const int pending_global_handle_count = READ_FIELD(14);
+ const int near_death_global_handle_count = READ_FIELD(15);
+ const int destroyed_global_handle_count = READ_FIELD(16);
+ const int memory_allocator_size = READ_FIELD(17);
+ const int memory_allocator_capacity = READ_FIELD(18);
+ const int os_error = READ_FIELD(19);
#undef READ_FIELD
int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
@@ -225,10 +223,8 @@ void DumpHeapStats(const char *minidump_file) {
printf("\t%-25s\t% 10.3f MB\n", #stat ":", toM(stat));
PRINT_MB_STAT(new_space_size);
PRINT_MB_STAT(new_space_capacity);
- PRINT_MB_STAT(old_pointer_space_size);
- PRINT_MB_STAT(old_pointer_space_capacity);
- PRINT_MB_STAT(old_data_space_size);
- PRINT_MB_STAT(old_data_space_capacity);
+ PRINT_MB_STAT(old_space_size);
+ PRINT_MB_STAT(old_space_capacity);
PRINT_MB_STAT(code_space_size);
PRINT_MB_STAT(code_space_capacity);
PRINT_MB_STAT(map_space_size);
diff --git a/tools/parser-shell.cc b/tools/parser-shell.cc
index 2cafc838..5d4b0cc4 100644
--- a/tools/parser-shell.cc
+++ b/tools/parser-shell.cc
@@ -36,15 +36,25 @@
#include "include/libplatform/libplatform.h"
#include "src/api.h"
#include "src/compiler.h"
-#include "src/scanner-character-streams.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparser.h"
#include "tools/shell-utils.h"
-#include "src/parser.h"
-#include "src/preparse-data-format.h"
-#include "src/preparse-data.h"
-#include "src/preparser.h"
using namespace v8::internal;
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) {
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
+ }
+ virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+ virtual void Free(void* data, size_t) { free(data); }
+};
+
class StringResource8 : public v8::String::ExternalOneByteStringResource {
public:
StringResource8(const char* data, int length)
@@ -59,43 +69,49 @@ class StringResource8 : public v8::String::ExternalOneByteStringResource {
std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
- v8::Handle<v8::Context> context) {
+ v8::Local<v8::Context> context) {
int length = 0;
const byte* source = ReadFileAndRepeat(fname, &length, repeat);
- v8::Handle<v8::String> source_handle;
+ v8::Local<v8::String> source_handle;
switch (encoding) {
case UTF8: {
source_handle = v8::String::NewFromUtf8(
- isolate, reinterpret_cast<const char*>(source));
+ isolate, reinterpret_cast<const char*>(source),
+ v8::NewStringType::kNormal).ToLocalChecked();
break;
}
case UTF16: {
- source_handle = v8::String::NewFromTwoByte(
- isolate, reinterpret_cast<const uint16_t*>(source),
- v8::String::kNormalString, length / 2);
+ source_handle =
+ v8::String::NewFromTwoByte(
+ isolate, reinterpret_cast<const uint16_t*>(source),
+ v8::NewStringType::kNormal, length / 2).ToLocalChecked();
break;
}
case LATIN1: {
StringResource8* string_resource =
new StringResource8(reinterpret_cast<const char*>(source), length);
- source_handle = v8::String::NewExternal(isolate, string_resource);
+ source_handle = v8::String::NewExternalOneByte(isolate, string_resource)
+ .ToLocalChecked();
break;
}
}
v8::base::TimeDelta parse_time1, parse_time2;
- Handle<Script> script = Isolate::Current()->factory()->NewScript(
- v8::Utils::OpenHandle(*source_handle));
+ Handle<Script> script =
+ reinterpret_cast<i::Isolate*>(isolate)->factory()->NewScript(
+ v8::Utils::OpenHandle(*source_handle));
i::ScriptData* cached_data_impl = NULL;
// First round of parsing (produce data to cache).
{
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- info.SetCachedData(&cached_data_impl,
- v8::ScriptCompiler::kProduceParserCache);
+ Zone zone;
+ ParseInfo info(&zone, script);
+ info.set_global();
+ info.set_cached_data(&cached_data_impl);
+ info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise we won't produce cached data.
- bool success = Parser::Parse(&info, true);
+ info.set_allow_lazy_parsing();
+ bool success = Parser::ParseStatic(&info);
parse_time1 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
@@ -104,14 +120,16 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
}
// Second round of parsing (consume cached data).
{
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- info.SetCachedData(&cached_data_impl,
- v8::ScriptCompiler::kConsumeParserCache);
+ Zone zone;
+ ParseInfo info(&zone, script);
+ info.set_global();
+ info.set_cached_data(&cached_data_impl);
+ info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise cached data won't help.
- bool success = Parser::Parse(&info, true);
+ info.set_allow_lazy_parsing();
+ bool success = Parser::ParseStatic(&info);
parse_time2 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
@@ -128,6 +146,8 @@ int main(int argc, char* argv[]) {
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
+ v8::V8::InitializeExternalStartupData(argv[0]);
+
Encoding encoding = LATIN1;
std::vector<std::string> fnames;
std::string benchmark;
@@ -148,11 +168,14 @@ int main(int argc, char* argv[]) {
fnames.push_back(std::string(argv[i]));
}
}
- v8::Isolate* isolate = v8::Isolate::New();
+ ArrayBufferAllocator array_buffer_allocator;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = &array_buffer_allocator;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
DCHECK(!context.IsEmpty());
{
diff --git a/tools/parser-shell.gyp b/tools/parser-shell.gyp
index f0f0b8b6..77ed1eb2 100644
--- a/tools/parser-shell.gyp
+++ b/tools/parser-shell.gyp
@@ -50,6 +50,10 @@
'include_dirs+': [
'..',
],
+ 'defines': [
+ # TODO(jochen): Remove again after this is globally turned on.
+ 'V8_IMMINENT_DEPRECATION_WARNINGS',
+ ],
'sources': [
'parser-shell.cc',
'shell-utils.h',
diff --git a/tools/perf-to-html.py b/tools/perf-to-html.py
new file mode 100755
index 00000000..63faeb1d
--- /dev/null
+++ b/tools/perf-to-html.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''
+python %prog
+
+Convert a perf trybot JSON file into a pleasing HTML page. It can read
+from standard input or via the --filename option. Examples:
+
+ cat results.json | %prog --title "ia32 results"
+ %prog -f results.json -t "ia32 results" -o results.html
+'''
+
+import commands
+import json
+import math
+from optparse import OptionParser
+import os
+import shutil
+import sys
+import tempfile
+
+PERCENT_CONSIDERED_SIGNIFICANT = 0.5
+PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
+PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
+
+
+def ComputeZ(baseline_avg, baseline_sigma, mean, n):
+ if baseline_sigma == 0:
+ return 1000.0;
+ return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
+
+
+# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
+def ComputeProbability(z):
+ if z > 2.575829: # p 0.005: two sided < 0.01
+ return 0
+ if z > 2.326348: # p 0.010
+ return 0.01
+ if z > 2.170091: # p 0.015
+ return 0.02
+ if z > 2.053749: # p 0.020
+ return 0.03
+ if z > 1.959964: # p 0.025: two sided < 0.05
+ return 0.04
+ if z > 1.880793: # p 0.030
+ return 0.05
+ if z > 1.811910: # p 0.035
+ return 0.06
+ if z > 1.750686: # p 0.040
+ return 0.07
+ if z > 1.695397: # p 0.045
+ return 0.08
+ if z > 1.644853: # p 0.050: two sided < 0.10
+ return 0.09
+ if z > 1.281551: # p 0.100: two sided < 0.20
+ return 0.10
+ return 0.20 # two sided p >= 0.20
+
+
+class Result:
+ def __init__(self, test_name, count, hasScoreUnits, result, sigma,
+ master_result, master_sigma):
+ self.result_ = float(result)
+ self.sigma_ = float(sigma)
+ self.master_result_ = float(master_result)
+ self.master_sigma_ = float(master_sigma)
+ self.significant_ = False
+ self.notable_ = 0
+ self.percentage_string_ = ""
+ # compute notability and significance.
+ if hasScoreUnits:
+ compare_num = 100*self.result_/self.master_result_ - 100
+ else:
+ compare_num = 100*self.master_result_/self.result_ - 100
+ if abs(compare_num) > 0.1:
+ self.percentage_string_ = "%3.1f" % (compare_num)
+ z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
+ p = ComputeProbability(z)
+ if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
+ self.significant_ = True
+ if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = 1
+ elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = -1
+
+ def result(self):
+ return self.result_
+
+ def sigma(self):
+ return self.sigma_
+
+ def master_result(self):
+ return self.master_result_
+
+ def master_sigma(self):
+ return self.master_sigma_
+
+ def percentage_string(self):
+ return self.percentage_string_;
+
+ def isSignificant(self):
+ return self.significant_
+
+ def isNotablyPositive(self):
+ return self.notable_ > 0
+
+ def isNotablyNegative(self):
+ return self.notable_ < 0
+
+
+class Benchmark:
+ def __init__(self, name, data):
+ self.name_ = name
+ self.tests_ = {}
+ for test in data:
+ # strip off "<name>/" prefix
+ test_name = test.split("/")[1]
+ self.appendResult(test_name, data[test])
+
+ # tests is a dictionary of Results
+ def tests(self):
+ return self.tests_
+
+ def SortedTestKeys(self):
+ keys = self.tests_.keys()
+ keys.sort()
+ t = "Total"
+ if t in keys:
+ keys.remove(t)
+ keys.append(t)
+ return keys
+
+ def name(self):
+ return self.name_
+
+ def appendResult(self, test_name, test_data):
+ with_string = test_data["result with patch "]
+ data = with_string.split()
+ master_string = test_data["result without patch"]
+ master_data = master_string.split()
+ runs = int(test_data["runs"])
+ units = test_data["units"]
+ hasScoreUnits = units == "score"
+ self.tests_[test_name] = Result(test_name,
+ runs,
+ hasScoreUnits,
+ data[0], data[2],
+ master_data[0], master_data[2])
+
+
+class BenchmarkRenderer:
+ def __init__(self, output_file):
+ self.print_output_ = []
+ self.output_file_ = output_file
+
+ def Print(self, str_data):
+ self.print_output_.append(str_data)
+
+ def FlushOutput(self):
+ string_data = "\n".join(self.print_output_)
+ print_output = []
+ if self.output_file_:
+ # create a file
+ with open(self.output_file_, "w") as text_file:
+ text_file.write(string_data)
+ else:
+ print(string_data)
+
+ def RenderOneBenchmark(self, benchmark):
+ self.Print("<h2>")
+ self.Print("<a name=\"" + benchmark.name() + "\">")
+ self.Print(benchmark.name() + "</a> <a href=\"#top\">(top)</a>")
+ self.Print("</h2>");
+ self.Print("<table class=\"benchmark\">")
+ self.Print("<thead>")
+ self.Print(" <th>Test</th>")
+ self.Print(" <th>Result</th>")
+ self.Print(" <th>Master</th>")
+ self.Print(" <th>%</th>")
+ self.Print("</thead>")
+ self.Print("<tbody>")
+ tests = benchmark.tests()
+ for test in benchmark.SortedTestKeys():
+ t = tests[test]
+ self.Print(" <tr>")
+ self.Print(" <td>" + test + "</td>")
+ self.Print(" <td>" + str(t.result()) + "</td>")
+ self.Print(" <td>" + str(t.master_result()) + "</td>")
+ t = tests[test]
+ res = t.percentage_string()
+ if t.isSignificant():
+ res = self.bold(res)
+ if t.isNotablyPositive():
+ res = self.green(res)
+ elif t.isNotablyNegative():
+ res = self.red(res)
+ self.Print(" <td>" + res + "</td>")
+ self.Print(" </tr>")
+ self.Print("</tbody>")
+ self.Print("</table>")
+
+ def ProcessJSONData(self, data, title):
+ self.Print("<h1>" + title + "</h1>")
+ self.Print("<ul>")
+ for benchmark in data:
+ if benchmark != "errors":
+ self.Print("<li><a href=\"#" + benchmark + "\">" + benchmark + "</a></li>")
+ self.Print("</ul>")
+ for benchmark in data:
+ if benchmark != "errors":
+ benchmark_object = Benchmark(benchmark, data[benchmark])
+ self.RenderOneBenchmark(benchmark_object)
+
+ def bold(self, data):
+ return "<b>" + data + "</b>"
+
+ def red(self, data):
+ return "<font color=\"red\">" + data + "</font>"
+
+
+ def green(self, data):
+ return "<font color=\"green\">" + data + "</font>"
+
+ def PrintHeader(self):
+ data = """<html>
+<head>
+<title>Output</title>
+<style type="text/css">
+/*
+Style inspired by Andy Ferra's gist at https://gist.github.com/andyferra/2554919
+*/
+body {
+ font-family: Helvetica, arial, sans-serif;
+ font-size: 14px;
+ line-height: 1.6;
+ padding-top: 10px;
+ padding-bottom: 10px;
+ background-color: white;
+ padding: 30px;
+}
+h1, h2, h3, h4, h5, h6 {
+ margin: 20px 0 10px;
+ padding: 0;
+ font-weight: bold;
+ -webkit-font-smoothing: antialiased;
+ cursor: text;
+ position: relative;
+}
+h1 {
+ font-size: 28px;
+ color: black;
+}
+
+h2 {
+ font-size: 24px;
+ border-bottom: 1px solid #cccccc;
+ color: black;
+}
+
+h3 {
+ font-size: 18px;
+}
+
+h4 {
+ font-size: 16px;
+}
+
+h5 {
+ font-size: 14px;
+}
+
+h6 {
+ color: #777777;
+ font-size: 14px;
+}
+
+p, blockquote, ul, ol, dl, li, table, pre {
+ margin: 15px 0;
+}
+
+li p.first {
+ display: inline-block;
+}
+
+ul, ol {
+ padding-left: 30px;
+}
+
+ul :first-child, ol :first-child {
+ margin-top: 0;
+}
+
+ul :last-child, ol :last-child {
+ margin-bottom: 0;
+}
+
+table {
+ padding: 0;
+}
+
+table tr {
+ border-top: 1px solid #cccccc;
+ background-color: white;
+ margin: 0;
+ padding: 0;
+}
+
+table tr:nth-child(2n) {
+ background-color: #f8f8f8;
+}
+
+table tr th {
+ font-weight: bold;
+ border: 1px solid #cccccc;
+ text-align: left;
+ margin: 0;
+ padding: 6px 13px;
+}
+table tr td {
+ border: 1px solid #cccccc;
+ text-align: left;
+ margin: 0;
+ padding: 6px 13px;
+}
+table tr th :first-child, table tr td :first-child {
+ margin-top: 0;
+}
+table tr th :last-child, table tr td :last-child {
+ margin-bottom: 0;
+}
+</style>
+</head>
+<body>
+"""
+ self.Print(data)
+
+ def PrintFooter(self):
+ data = """</body>
+</html>
+"""
+ self.Print(data)
+
+
+def Render(opts, args):
+ if opts.filename:
+ with open(opts.filename) as json_data:
+ data = json.load(json_data)
+ else:
+ # load data from stdin
+ data = json.load(sys.stdin)
+
+ if opts.title:
+ title = opts.title
+ elif opts.filename:
+ title = opts.filename
+ else:
+ title = "Benchmark results"
+ renderer = BenchmarkRenderer(opts.output)
+ renderer.PrintHeader()
+ renderer.ProcessJSONData(data, title)
+ renderer.PrintFooter()
+ renderer.FlushOutput()
+
+
+if __name__ == '__main__':
+ parser = OptionParser(usage=__doc__)
+ parser.add_option("-f", "--filename", dest="filename",
+ help="Specifies the filename for the JSON results "
+ "rather than reading from stdin.")
+ parser.add_option("-t", "--title", dest="title",
+ help="Optional title of the web page.")
+ parser.add_option("-o", "--output", dest="output",
+ help="Write html output to this file rather than stdout.")
+
+ (opts, args) = parser.parse_args()
+ Render(opts, args)
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 321d2910..99865690 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -35,6 +35,7 @@ except ImportError, e:
md5er = md5.new
+import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
@@ -45,65 +46,28 @@ import subprocess
import multiprocessing
from subprocess import PIPE
-# Disabled LINT rules and reason.
-# build/include_what_you_use: Started giving false positives for variables
-# named "string" and "map" assuming that you needed to include STL headers.
-
-ENABLED_LINT_RULES = """
-build/class
-build/deprecated
-build/endif_comment
-build/forward_decl
-build/include_alpha
-build/include_order
-build/printf_format
-build/storage_class
-legal/copyright
-readability/boost
-readability/braces
-readability/casting
-readability/constructors
-readability/fn_size
-readability/function
-readability/multiline_comment
-readability/multiline_string
-readability/streams
-readability/todo
-readability/utf8
-runtime/arrays
-runtime/casting
-runtime/deprecated_fn
-runtime/explicit
-runtime/int
-runtime/memset
-runtime/mutex
-runtime/nonconf
-runtime/printf
-runtime/printf_format
-runtime/rtti
-runtime/sizeof
-runtime/string
-runtime/virtual
-runtime/vlog
-whitespace/blank_line
-whitespace/braces
-whitespace/comma
-whitespace/comments
-whitespace/ending_newline
-whitespace/indent
-whitespace/labels
-whitespace/line_length
-whitespace/newline
-whitespace/operators
-whitespace/parens
-whitespace/tab
-whitespace/todo
-""".split()
+from testrunner.local import statusfile
+from testrunner.local import testsuite
+from testrunner.local import utils
+# Special LINT rules diverging from default and reason.
+# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
+# build/include_what_you_use: Started giving false positives for variables
+# named "string" and "map" assuming that you needed to include STL headers.
# TODO(bmeurer): Fix and re-enable readability/check
-LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
+LINT_RULES = """
+-build/header_guard
++build/include_alpha
+-build/include_what_you_use
+-build/namespaces
+-readability/check
++readability/streams
+-runtime/references
+""".split()
+LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
+FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
def CppLintWorker(command):
try:
@@ -256,15 +220,15 @@ class CppLintProcessor(SourceFileProcessor):
print 'No changes in files detected. Skipping cpplint check.'
return True
- filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
- command = [sys.executable, 'cpplint.py', '--filter', filt]
+ filters = ",".join([n for n in LINT_RULES])
+ command = [sys.executable, 'cpplint.py', '--filter', filters]
cpplint = self.GetCpplintScript(join(path, "tools"))
if cpplint is None:
print('Could not find cpplint.py. Make sure '
'depot_tools is installed and in the path.')
sys.exit(1)
- command = [sys.executable, cpplint, '--filter', filt]
+ command = [sys.executable, cpplint, '--filter', filters]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
@@ -341,9 +305,14 @@ class SourceProcessor(SourceFileProcessor):
'libraries-empty.cc',
'lua_binarytrees.js',
'memops.js',
+ 'poppler.js',
'primes.js',
'raytrace.js',
'regexp-pcre.js',
+ 'sqlite.js',
+ 'sqlite-change-heap.js',
+ 'sqlite-pointer-masking.js',
+ 'sqlite-safe-heap.js',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
@@ -409,6 +378,12 @@ class SourceProcessor(SourceFileProcessor):
print "%s does not have two empty lines between declarations " \
"in line %s." % (name, linenumbers)
result = False
+ # Sanitize flags for fuzzer.
+ if "mjsunit" in name:
+ match = FLAGS_LINE.search(contents)
+ if match:
+ print "%s Flags should use '-' (not '_')" % name
+ result = False
return result
def ProcessFiles(self, files, path):
@@ -427,18 +402,94 @@ class SourceProcessor(SourceFileProcessor):
return success
-def CheckRuntimeVsNativesNameClashes(workspace):
- code = subprocess.call(
- [sys.executable, join(workspace, "tools", "check-name-clashes.py")])
- return code == 0
-
-
def CheckExternalReferenceRegistration(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
+def _CheckStatusFileForDuplicateKeys(filepath):
+ comma_space_bracket = re.compile(", *]")
+ lines = []
+ with open(filepath) as f:
+ for line in f.readlines():
+ # Skip all-comment lines.
+ if line.lstrip().startswith("#"): continue
+ # Strip away comments at the end of the line.
+ comment_start = line.find("#")
+ if comment_start != -1:
+ line = line[:comment_start]
+ line = line.strip()
+ # Strip away trailing commas within the line.
+ line = comma_space_bracket.sub("]", line)
+ if len(line) > 0:
+ lines.append(line)
+
+ # Strip away trailing commas at line ends. Ugh.
+ for i in range(len(lines) - 1):
+ if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
+ lines[i + 1][0] in ("}", "]")):
+ lines[i] = lines[i][:-1]
+
+ contents = "\n".join(lines)
+ # JSON wants double-quotes.
+ contents = contents.replace("'", '"')
+ # Fill in keywords (like PASS, SKIP).
+ for key in statusfile.KEYWORDS:
+ contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
+
+ status = {"success": True}
+ def check_pairs(pairs):
+ keys = {}
+ for key, value in pairs:
+ if key in keys:
+ print("%s: Error: duplicate key %s" % (filepath, key))
+ status["success"] = False
+ keys[key] = True
+
+ json.loads(contents, object_pairs_hook=check_pairs)
+ return status["success"]
+
+def CheckStatusFiles(workspace):
+ success = True
+ suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ for root in suite_paths:
+ suite_path = join(workspace, "test", root)
+ status_file_path = join(suite_path, root + ".status")
+ suite = testsuite.TestSuite.LoadTestSuite(suite_path)
+ if suite and exists(status_file_path):
+ success &= statusfile.PresubmitCheck(status_file_path)
+ success &= _CheckStatusFileForDuplicateKeys(status_file_path)
+ return success
+
+def CheckAuthorizedAuthor(input_api, output_api):
+ """For non-googler/chromites committers, verify the author's email address is
+ in AUTHORS.
+ """
+ # TODO(maruel): Add it to input_api?
+ import fnmatch
+
+ author = input_api.change.author_email
+ if not author:
+ input_api.logging.info('No author, skipping AUTHOR check')
+ return []
+ authors_path = input_api.os_path.join(
+ input_api.PresubmitLocalPath(), 'AUTHORS')
+ valid_authors = (
+ input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
+ for line in open(authors_path))
+ valid_authors = [item.group(1).lower() for item in valid_authors if item]
+ if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
+ input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
+ return [output_api.PresubmitPromptWarning(
+ ('%s is not in AUTHORS file. If you are a new contributor, please visit'
+ '\n'
+ 'http://www.chromium.org/developers/contributing-code and read the '
+ '"Legal" section\n'
+ 'If you are a chromite, verify the contributor signed the CLA.') %
+ author)]
+ return []
+
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
@@ -453,12 +504,12 @@ def Main():
success = True
print "Running C++ lint check..."
if not options.no_lint:
- success = CppLintProcessor().Run(workspace) and success
+ success &= CppLintProcessor().Run(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
- success = SourceProcessor().Run(workspace) and success
- success = CheckRuntimeVsNativesNameClashes(workspace) and success
- success = CheckExternalReferenceRegistration(workspace) and success
+ success &= SourceProcessor().Run(workspace)
+ success &= CheckExternalReferenceRegistration(workspace)
+ success &= CheckStatusFiles(workspace)
if success:
return 0
else:
diff --git a/tools/profile.js b/tools/profile.js
index a06cd3a5..f0814a2f 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -257,26 +257,28 @@ Profile.prototype.resolveAndFilterFuncs_ = function(stack) {
var entry = this.codeMap_.findEntry(stack[i]);
if (entry) {
var name = entry.getName();
- if (i == 0 && (entry.type == 'CPP' || entry.type == 'SHARED_LIB')) {
+ if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
look_for_first_c_function = true;
}
- if (look_for_first_c_function) {
- if (entry.type == 'CPP') {
- last_seen_c_function = name;
- } else if (i > 0 && last_seen_c_function != '') {
- if (this.c_entries_[last_seen_c_function] === undefined) {
- this.c_entries_[last_seen_c_function] = 0;
- }
- this.c_entries_[last_seen_c_function]++;
- look_for_first_c_function = false; // Found it, we're done.
- }
+ if (look_for_first_c_function && entry.type === 'CPP') {
+ last_seen_c_function = name;
}
if (!this.skipThisFunction(name)) {
result.push(name);
}
} else {
- this.handleUnknownCode(
- Profile.Operation.TICK, stack[i], i);
+ this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
+ if (i === 0) result.push("UNKNOWN");
+ }
+ if (look_for_first_c_function &&
+ i > 0 &&
+ (!entry || entry.type !== 'CPP') &&
+ last_seen_c_function !== '') {
+ if (this.c_entries_[last_seen_c_function] === undefined) {
+ this.c_entries_[last_seen_c_function] = 0;
+ }
+ this.c_entries_[last_seen_c_function]++;
+ look_for_first_c_function = false; // Found it, we're done.
}
}
return result;
diff --git a/tools/push-to-trunk/auto_roll.py b/tools/push-to-trunk/auto_roll.py
deleted file mode 100755
index 1b57097c..00000000
--- a/tools/push-to-trunk/auto_roll.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import argparse
-import json
-import os
-import sys
-import urllib
-
-from common_includes import *
-import chromium_roll
-
-
-class CheckActiveRoll(Step):
- MESSAGE = "Check active roll."
-
- @staticmethod
- def ContainsChromiumRoll(changes):
- for change in changes:
- if change["subject"].startswith("Update V8 to"):
- return True
- return False
-
- def RunStep(self):
- params = {
- "closed": 3,
- "owner": self._options.author,
- "limit": 30,
- "format": "json",
- }
- params = urllib.urlencode(params)
- search_url = "https://codereview.chromium.org/search"
- result = self.ReadURL(search_url, params, wait_plan=[5, 20])
- if self.ContainsChromiumRoll(json.loads(result)["results"]):
- print "Stop due to existing Chromium roll."
- return True
-
-
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of the last push to trunk."
-
- def RunStep(self):
- self.vc.Fetch()
- push_hash = self.FindLastTrunkPush(
- branch="origin/candidates", include_patches=True)
- self["last_push"] = self.GetCommitPositionNumber(push_hash)
-
-
-class DetectLastRoll(Step):
- MESSAGE = "Detect commit ID of the last Chromium roll."
-
- def RunStep(self):
- # Interpret the DEPS file to retrieve the v8 revision.
- # TODO(machenbach): This should be part or the roll-deps api of
- # depot_tools.
- Var = lambda var: '%s'
- exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
- last_roll = self.GetCommitPositionNumber(vars['v8_revision'])
- # FIXME(machenbach): When rolling from bleeding edge and from trunk there
- # be different commit numbers here. Better use version?
- if int(last_roll) >= int(self["last_push"]):
- print("There is no newer v8 revision than the one in Chromium (%s)."
- % last_roll)
- return True
-
-
-class CheckClusterFuzz(Step):
- MESSAGE = "Check ClusterFuzz api for new problems."
-
- def RunStep(self):
- if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
- print "Skipping ClusterFuzz check. No api key file found."
- return False
- api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
- # Check for open, reproducible issues that have no associated bug.
- result = self._side_effect_handler.ReadClusterFuzzAPI(
- api_key, job_type="linux_asan_d8_dbg", reproducible="True",
- open="True", bug_information="",
- revision_greater_or_equal=str(self["last_push"]))
- if result:
- print "Stop due to pending ClusterFuzz issues."
- return True
-
-
-class RollChromium(Step):
- MESSAGE = "Roll V8 into Chromium."
-
- def RunStep(self):
- if self._options.roll:
- args = [
- "--author", self._options.author,
- "--reviewer", self._options.reviewer,
- "--chromium", self._options.chromium,
- "--use-commit-queue",
- ]
- if self._options.sheriff:
- args.extend([
- "--sheriff", "--googlers-mapping", self._options.googlers_mapping])
- if self._options.dry_run:
- args.extend(["--dry-run"])
- if self._options.work_dir:
- args.extend(["--work-dir", self._options.work_dir])
- self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
-
-
-class AutoRoll(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("-c", "--chromium", required=True,
- help=("The path to your Chromium src/ "
- "directory to automate the V8 roll."))
- parser.add_argument("--roll", help="Call Chromium roll script.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.reviewer:
- print "A reviewer (-r) is required."
- return False
- if not options.author:
- print "An author (-a) is required."
- return False
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
- "CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
- }
-
- def _Steps(self):
- return [
- CheckActiveRoll,
- DetectLastPush,
- DetectLastRoll,
- CheckClusterFuzz,
- RollChromium,
- ]
-
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(AutoRoll().Run())
diff --git a/tools/push-to-trunk/bump_up_version.py b/tools/push-to-trunk/bump_up_version.py
deleted file mode 100755
index 647708c0..00000000
--- a/tools/push-to-trunk/bump_up_version.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script for auto-increasing the version on bleeding_edge.
-
-The script can be run regularly by a cron job. It will increase the build
-level of the version on bleeding_edge if:
-- the lkgr version is smaller than the version of the latest revision,
-- the lkgr version is not a version change itself,
-- the tree is not closed for maintenance.
-
-The new version will be the maximum of the bleeding_edge and trunk versions +1.
-E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives
-the new version 3.23.1.0.
-
-This script requires a depot tools git checkout. I.e. 'fetch v8'.
-"""
-
-import argparse
-import os
-import sys
-
-from common_includes import *
-
-VERSION_BRANCH = "auto-bump-up-version"
-
-
-# TODO(machenbach): Add vc interface that works on git mirror.
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # TODO(machenbach): Remove after the git switch.
- if(self.Config("PERSISTFILE_BASENAME") ==
- "/tmp/v8-bump-up-version-tempfile"):
- print "This script is disabled until after the v8 git migration."
- return True
-
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(): # pragma: no cover
- # This is in case a developer runs this script on a dirty tree.
- self.GitStash()
-
- self.GitCheckout("master")
-
- self.GitPull()
-
- # Ensure a clean version branch.
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class GetCurrentBleedingEdgeVersion(Step):
- MESSAGE = "Get latest bleeding edge version."
-
- def RunStep(self):
- self.GitCheckout("master")
-
- # Store latest version and revision.
- self.ReadAndPersistVersion()
- self["latest_version"] = self.ArrayToVersion("")
- self["latest"] = self.GitLog(n=1, format="%H")
- print "Bleeding edge version: %s" % self["latest_version"]
-
-
-# This step is pure paranoia. It forbids the script to continue if the last
-# commit changed version.cc. Just in case the other bailout has a bug, this
-# prevents the script from continuously commiting version changes.
-class LastChangeBailout(Step):
- MESSAGE = "Stop script if the last change modified the version."
-
- def RunStep(self):
- if VERSION_FILE in self.GitChangedFiles(self["latest"]):
- print "Stop due to recent version change."
- return True
-
-
-# TODO(machenbach): Implement this for git.
-class FetchLKGR(Step):
- MESSAGE = "Fetching V8 LKGR."
-
- def RunStep(self):
- lkgr_url = "https://v8-status.appspot.com/lkgr"
- self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5])
-
-
-# TODO(machenbach): Implement this for git. With a git lkgr we could simply
-# checkout that revision. With svn, we have to search backwards until that
-# revision is found.
-class GetLKGRVersion(Step):
- MESSAGE = "Get bleeding edge lkgr version."
-
- def RunStep(self):
- self.GitCheckout("master")
- # If the commit was made from svn, there is a mapping entry in the commit
- # message.
- self["lkgr"] = self.GitLog(
- grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"],
- format="%H")
-
- # FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the
- # trunk branch (rarely).
- if not self["lkgr"]: # pragma: no cover
- self.Die("No git hash found for svn lkgr.")
-
- self.GitCreateBranch(VERSION_BRANCH, self["lkgr"])
- self.ReadAndPersistVersion("lkgr_")
- self["lkgr_version"] = self.ArrayToVersion("lkgr_")
- print "LKGR version: %s" % self["lkgr_version"]
-
- # Ensure a clean version branch.
- self.GitCheckout("master")
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class LKGRVersionUpToDateBailout(Step):
- MESSAGE = "Stop script if the lkgr has a renewed version."
-
- def RunStep(self):
- # If a version-change commit becomes the lkgr, don't bump up the version
- # again.
- if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
- print "Stop because the lkgr is a version change itself."
- return True
-
- # Don't bump up the version if it got updated already after the lkgr.
- if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]):
- print("Stop because the latest version already changed since the lkgr "
- "version.")
- return True
-
-
-class GetTrunkVersion(Step):
- MESSAGE = "Get latest trunk version."
-
- def RunStep(self):
- self.GitCheckout("candidates")
- self.GitPull()
- self.ReadAndPersistVersion("trunk_")
- self["trunk_version"] = self.ArrayToVersion("trunk_")
- print "Trunk version: %s" % self["trunk_version"]
-
-
-class CalculateVersion(Step):
- MESSAGE = "Calculate the new version."
-
- def RunStep(self):
- if self["lkgr_build"] == "9999": # pragma: no cover
- # If version control on bleeding edge was switched off, just use the last
- # trunk version.
- self["lkgr_version"] = self["trunk_version"]
-
- # The new version needs to be greater than the max on bleeding edge and
- # trunk.
- max_version = max(self["trunk_version"],
- self["lkgr_version"],
- key=SortingKey)
-
- # Strip off possible leading zeros.
- self["new_major"], self["new_minor"], self["new_build"], _ = (
- map(str, map(int, max_version.split("."))))
-
- self["new_build"] = str(int(self["new_build"]) + 1)
- self["new_patch"] = "0"
-
- self["new_version"] = ("%s.%s.%s.0" %
- (self["new_major"], self["new_minor"], self["new_build"]))
- print "New version is %s" % self["new_version"]
-
- if self._options.dry_run: # pragma: no cover
- print "Dry run, skipping version change."
- return True
-
-
-class CheckTreeStatus(Step):
- MESSAGE = "Checking v8 tree status message."
-
- def RunStep(self):
- status_url = "https://v8-status.appspot.com/current?format=json"
- status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
- message = json.loads(status_json)["message"]
- if re.search(r"maintenance|no commits", message, flags=re.I):
- print "Skip version change by tree status: \"%s\"" % message
- return True
-
-
-class ChangeVersion(Step):
- MESSAGE = "Bump up the version."
-
- def RunStep(self):
- self.GitCreateBranch(VERSION_BRANCH, "master")
-
- self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
-
- try:
- msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
- self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
- author=self._options.author)
- self.GitUpload(author=self._options.author,
- force=self._options.force_upload,
- bypass_hooks=True)
- self.GitCLLand()
- print "Successfully changed the version."
- finally:
- # Clean up.
- self.GitCheckout("master")
- self.DeleteBranch(VERSION_BRANCH)
-
-
-class BumpUpVersion(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("--dry_run", help="Don't commit the new version.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.dry_run and not options.author:
- print "Specify your chromium.org email with -a"
- return False
- options.wait_for_lgtm = False
- options.force_readline_defaults = True
- options.force_upload = True
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
- "PATCH_FILE": "/tmp/v8-bump-up-version-tempfile-patch-file",
- }
-
- def _Steps(self):
- return [
- Preparation,
- GetCurrentBleedingEdgeVersion,
- LastChangeBailout,
- FetchLKGR,
- GetLKGRVersion,
- LKGRVersionUpToDateBailout,
- GetTrunkVersion,
- CalculateVersion,
- CheckTreeStatus,
- ChangeVersion,
- ]
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(BumpUpVersion().Run())
diff --git a/tools/push-to-trunk/chromium_roll.py b/tools/push-to-trunk/chromium_roll.py
deleted file mode 100755
index 5c9a38ec..00000000
--- a/tools/push-to-trunk/chromium_roll.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import argparse
-import os
-import sys
-
-from common_includes import *
-
-
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # Update v8 remote tracking branches.
- self.GitFetchOrigin()
-
-
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of last push to trunk."
-
- def RunStep(self):
- self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
- branch="origin/candidates", include_patches=True)
- self["push_title"] = self.GitLog(n=1, format="%s",
- git_hash=self["last_push"])
-
-
-class SwitchChromium(Step):
- MESSAGE = "Switch to Chromium checkout."
-
- def RunStep(self):
- self["v8_path"] = os.getcwd()
- cwd = self._options.chromium
- os.chdir(cwd)
- self.InitialEnvironmentChecks(cwd)
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
- self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Assert that the DEPS file is there.
- if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
- self.Die("DEPS file not present.")
-
-
-class UpdateChromiumCheckout(Step):
- MESSAGE = "Update the checkout and create a new branch."
-
- def RunStep(self):
- self.GitCheckout("master", cwd=self._options.chromium)
- self.Command("gclient", "sync --nohooks", cwd=self._options.chromium)
- self.GitPull(cwd=self._options.chromium)
-
- # Update v8 remotes.
- self.GitFetchOrigin()
-
- self.GitCreateBranch("v8-roll-%s" % self["last_push"],
- cwd=self._options.chromium)
-
-
-class UploadCL(Step):
- MESSAGE = "Create and upload CL."
-
- def RunStep(self):
- # Patch DEPS file.
- if self.Command(
- "roll-dep", "v8 %s" % self["last_push"],
- cwd=self._options.chromium) is None:
- self.Die("Failed to create deps for %s" % self["last_push"])
-
- commit_title = "Update V8 to %s." % self["push_title"].lower()
- sheriff = ""
- if self["sheriff"]:
- sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
- % self["sheriff"])
- self.GitCommit("%s%s\n\nTBR=%s" %
- (commit_title, sheriff, self._options.reviewer),
- author=self._options.author,
- cwd=self._options.chromium)
- if not self._options.dry_run:
- self.GitUpload(author=self._options.author,
- force=True,
- cq=self._options.use_commit_queue,
- cwd=self._options.chromium)
- print "CL uploaded."
- else:
- self.GitCheckout("master", cwd=self._options.chromium)
- self.GitDeleteBranch("v8-roll-%s" % self["last_push"],
- cwd=self._options.chromium)
- print "Dry run - don't upload."
-
-
-# TODO(machenbach): Make this obsolete. We are only in the chromium chechout
-# for the initial .git check.
-class SwitchV8(Step):
- MESSAGE = "Returning to V8 checkout."
-
- def RunStep(self):
- os.chdir(self["v8_path"])
-
-
-class CleanUp(Step):
- MESSAGE = "Done!"
-
- def RunStep(self):
- print("Congratulations, you have successfully rolled %s into "
- "Chromium. Please don't forget to update the v8rel spreadsheet."
- % self["last_push"])
-
- # Clean up all temporary files.
- Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
-
-
-class ChromiumRoll(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("-c", "--chromium", required=True,
- help=("The path to your Chromium src/ "
- "directory to automate the V8 roll."))
- parser.add_argument("-l", "--last-push",
- help="The git commit ID of the last push to trunk.")
- parser.add_argument("--use-commit-queue",
- help="Check the CQ bit on upload.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.author or not options.reviewer:
- print "A reviewer (-r) and an author (-a) are required."
- return False
-
- options.requires_editor = False
- options.force = True
- options.manual = False
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
- }
-
- def _Steps(self):
- return [
- Preparation,
- DetectLastPush,
- DetermineV8Sheriff,
- SwitchChromium,
- UpdateChromiumCheckout,
- UploadCL,
- SwitchV8,
- CleanUp,
- ]
-
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(ChromiumRoll().Run())
diff --git a/tools/push-to-trunk/generate_version.py b/tools/push-to-trunk/generate_version.py
deleted file mode 100755
index b4a0221e..00000000
--- a/tools/push-to-trunk/generate_version.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Script to set v8's version file to the version given by the latest tag.
-"""
-
-
-import os
-import re
-import subprocess
-import sys
-
-
-CWD = os.path.abspath(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-VERSION_CC = os.path.join(CWD, "src", "version.cc")
-
-def main():
- tag = subprocess.check_output(
- "git describe --tags",
- shell=True,
- cwd=CWD,
- ).strip()
- assert tag
-
- # Check for commits not exactly matching a tag. Those are candidate builds
- # for the next version. The output has the form
- # <tag name>-<n commits>-<hash>.
- if "-" in tag:
- version = tag.split("-")[0]
- candidate = "1"
- else:
- version = tag
- candidate = "0"
- version_levels = version.split(".")
-
- # Set default patch level if none is given.
- if len(version_levels) == 3:
- version_levels.append("0")
- assert len(version_levels) == 4
-
- major, minor, build, patch = version_levels
-
- # Increment build level for candidate builds.
- if candidate == "1":
- build = str(int(build) + 1)
- patch = "0"
-
- # Modify version.cc with the new values.
- with open(VERSION_CC, "r") as f:
- text = f.read()
- output = []
- for line in text.split("\n"):
- for definition, substitute in (
- ("MAJOR_VERSION", major),
- ("MINOR_VERSION", minor),
- ("BUILD_NUMBER", build),
- ("PATCH_LEVEL", patch),
- ("IS_CANDIDATE_VERSION", candidate)):
- if line.startswith("#define %s" % definition):
- line = re.sub("\d+$", substitute, line)
- output.append(line)
- with open(VERSION_CC, "w") as f:
- f.write("\n".join(output))
-
- # Log what was done.
- candidate_txt = " (candidate)" if candidate == "1" else ""
- patch_txt = ".%s" % patch if patch != "0" else ""
- version_txt = ("%s.%s.%s%s%s" %
- (major, minor, build, patch_txt, candidate_txt))
- print "Modified version.cc. Set V8 version to %s" % version_txt
- return 0
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/tools/push-to-trunk/auto_push.py b/tools/release/auto_push.py
index 34afa4aa..ca9e5e87 100755
--- a/tools/push-to-trunk/auto_push.py
+++ b/tools/release/auto_push.py
@@ -34,75 +34,44 @@ import sys
import urllib
from common_includes import *
-import push_to_trunk
+import create_release
-PUSH_MESSAGE_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self.InitialEnvironmentChecks(self.default_cwd)
- self.CommonPrepare()
-
-
-class CheckAutoPushSettings(Step):
- MESSAGE = "Checking settings file."
-
- def RunStep(self):
- settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
- if os.path.exists(settings_file):
- settings_dict = json.loads(FileToText(settings_file))
- if settings_dict.get("enable_auto_roll") is False:
- self.Die("Push to trunk disabled by auto-roll settings file: %s"
- % settings_file)
-
-
-class CheckTreeStatus(Step):
- MESSAGE = "Checking v8 tree status message."
-
- def RunStep(self):
- status_url = "https://v8-status.appspot.com/current?format=json"
- status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
- self["tree_message"] = json.loads(status_json)["message"]
- if re.search(r"nopush|no push", self["tree_message"], flags=re.I):
- self.Die("Push to trunk disabled by tree state: %s"
- % self["tree_message"])
+ # Fetch unfetched revisions.
+ self.vc.Fetch()
class FetchCandidate(Step):
- MESSAGE = "Fetching V8 roll candidate ref."
+ MESSAGE = "Fetching V8 lkgr ref."
def RunStep(self):
- self.Git("fetch origin +refs/heads/candidate:refs/heads/candidate")
- self["candidate"] = self.Git("show-ref -s refs/heads/candidate").strip()
+ # The roll ref points to the candidate to be rolled.
+ self.Git("fetch origin +refs/heads/lkgr:refs/heads/lkgr")
+ self["candidate"] = self.Git("show-ref -s refs/heads/lkgr").strip()
-class CheckLastPush(Step):
- MESSAGE = "Checking last V8 push to trunk."
+class LastReleaseBailout(Step):
+ MESSAGE = "Checking last V8 release base."
def RunStep(self):
- last_push = self.FindLastTrunkPush()
-
- # Retrieve the bleeding edge revision of the last push from the text in
- # the push commit message.
- last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
- last_push_be = PUSH_MESSAGE_RE.match(last_push_title).group(1)
-
- if not last_push_be: # pragma: no cover
- self.Die("Could not retrieve bleeding edge revision for trunk push %s"
- % last_push)
+ last_release = self.GetLatestReleaseBase()
+ commits = self.GitLog(
+ format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
- if self["candidate"] == last_push_be:
- print "Already pushed current candidate %s" % last_push_be
+ if not commits:
+ print "Already pushed current candidate %s" % self["candidate"]
return True
-class PushToCandidates(Step):
- MESSAGE = "Pushing to candidates if specified."
+class CreateRelease(Step):
+ MESSAGE = "Creating release if specified."
def RunStep(self):
- print "Pushing candidate %s to candidates." % self["candidate"]
+ print "Creating release for %s." % self["candidate"]
args = [
"--author", self._options.author,
@@ -114,15 +83,15 @@ class PushToCandidates(Step):
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
- # TODO(machenbach): Update the script before calling it.
if self._options.push:
- self._side_effect_handler.Call(push_to_trunk.PushToTrunk().Run, args)
+ self._side_effect_handler.Call(
+ create_release.CreateRelease().Run, args)
class AutoPush(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-p", "--push",
- help="Push to trunk. Dry run if unspecified.",
+ help="Create release. Dry run if unspecified.",
default=False, action="store_true")
def _ProcessOptions(self, options):
@@ -135,17 +104,14 @@ class AutoPush(ScriptsBase):
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
- "SETTINGS_LOCATION": "~/.auto-roll",
}
def _Steps(self):
return [
Preparation,
- CheckAutoPushSettings,
- CheckTreeStatus,
FetchCandidate,
- CheckLastPush,
- PushToCandidates,
+ LastReleaseBailout,
+ CreateRelease,
]
diff --git a/tools/release/auto_roll.py b/tools/release/auto_roll.py
new file mode 100755
index 00000000..27fd3709
--- /dev/null
+++ b/tools/release/auto_roll.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+
+from common_includes import *
+
+ROLL_SUMMARY = ("Summary of changes available at:\n"
+ "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
+
+ISSUE_MSG = (
+"""Please follow these instructions for assigning/CC'ing issues:
+https://github.com/v8/v8/wiki/Triaging%20issues
+
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.""")
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'preparation'
+ # Update v8 remote tracking branches.
+ self.GitFetchOrigin()
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+
+class DetectLastRoll(Step):
+ MESSAGE = "Detect commit ID of the last Chromium roll."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_last_roll'
+ self["last_roll"] = self._options.last_roll
+ if not self["last_roll"]:
+ # Interpret the DEPS file to retrieve the v8 revision.
+ # TODO(machenbach): This should be part or the roll-deps api of
+ # depot_tools.
+ Var = lambda var: '%s'
+ exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
+
+ # The revision rolled last.
+ self["last_roll"] = vars['v8_revision']
+ self["last_version"] = self.GetVersionTag(self["last_roll"])
+ assert self["last_version"], "The last rolled v8 revision is not tagged."
+
+
+class DetectRevisionToRoll(Step):
+ MESSAGE = "Detect commit ID of the V8 revision to roll."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_revision'
+ self["roll"] = self._options.revision
+ if self["roll"]:
+ # If the revision was passed on the cmd line, continue script execution
+ # in the next step.
+ return False
+
+ # The revision that should be rolled. Check for the latest of the most
+ # recent releases based on commit timestamp.
+ revisions = self.GetRecentReleases(
+ max_age=self._options.max_age * DAY_IN_SECONDS)
+ assert revisions, "Didn't find any recent release."
+
+ # There must be some progress between the last roll and the new candidate
+ # revision (i.e. we don't go backwards). The revisions are ordered newest
+ # to oldest. It is possible that the newest timestamp has no progress
+ # compared to the last roll, i.e. if the newest release is a cherry-pick
+ # on a release branch. Then we look further.
+ for revision in revisions:
+ version = self.GetVersionTag(revision)
+ assert version, "Internal error. All recent releases should have a tag"
+
+ if SortingKey(self["last_version"]) < SortingKey(version):
+ self["roll"] = revision
+ break
+ else:
+ print("There is no newer v8 revision than the one in Chromium (%s)."
+ % self["last_roll"])
+ self['json_output']['monitoring_state'] = 'up_to_date'
+ return True
+
+
+class PrepareRollCandidate(Step):
+ MESSAGE = "Robustness checks of the roll candidate."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'prepare_candidate'
+ self["roll_title"] = self.GitLog(n=1, format="%s",
+ git_hash=self["roll"])
+
+ # Make sure the last roll and the roll candidate are releases.
+ version = self.GetVersionTag(self["roll"])
+ assert version, "The revision to roll is not tagged."
+ version = self.GetVersionTag(self["last_roll"])
+ assert version, "The revision used as last roll is not tagged."
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'switch_chromium'
+ cwd = self._options.chromium
+ self.InitialEnvironmentChecks(cwd)
+ # Check for a clean workdir.
+ if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'update_chromium'
+ cwd = self._options.chromium
+ self.GitCheckout("master", cwd=cwd)
+ self.DeleteBranch("work-branch", cwd=cwd)
+ self.Command("gclient", "sync --nohooks", cwd=cwd)
+ self.GitPull(cwd=cwd)
+
+ # Update v8 remotes.
+ self.GitFetchOrigin()
+
+ self.GitCreateBranch("work-branch", cwd=cwd)
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'upload'
+ cwd = self._options.chromium
+ # Patch DEPS file.
+ if self.Command("roll-dep-svn", "v8 %s" %
+ self["roll"], cwd=cwd) is None:
+ self.Die("Failed to create deps for %s" % self["roll"])
+
+ message = []
+ message.append("Update V8 to %s." % self["roll_title"].lower())
+
+ message.append(
+ ROLL_SUMMARY % (self["last_roll"][:8], self["roll"][:8]))
+
+ message.append(ISSUE_MSG)
+
+ message.append("TBR=%s" % self._options.reviewer)
+ self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
+ if not self._options.dry_run:
+ self.GitUpload(author=self._options.author,
+ force=True,
+ cq=self._options.use_commit_queue,
+ cwd=cwd)
+ print "CL uploaded."
+ else:
+ print "Dry run - don't upload."
+
+ self.GitCheckout("master", cwd=cwd)
+ self.GitDeleteBranch("work-branch", cwd=cwd)
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'success'
+ print("Congratulations, you have successfully rolled %s into "
+ "Chromium."
+ % self["roll"])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
+
+
+class AutoRoll(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ parser.add_argument("-c", "--chromium", required=True,
+ help=("The path to your Chromium src/ "
+ "directory to automate the V8 roll."))
+ parser.add_argument("--last-roll",
+ help="The git commit ID of the last rolled version. "
+ "Auto-detected if not specified.")
+ parser.add_argument("--max-age", default=7, type=int,
+ help="Maximum age in days of the latest release.")
+ parser.add_argument("--revision",
+ help="Revision to roll. Auto-detected if not "
+ "specified."),
+ parser.add_argument("--roll", help="Deprecated.",
+ default=True, action="store_true")
+ parser.add_argument("--use-commit-queue",
+ help="Check the CQ bit on upload.",
+ default=True, action="store_true")
+
+ def _ProcessOptions(self, options): # pragma: no cover
+ if not options.author or not options.reviewer:
+ print "A reviewer (-r) and an author (-a) are required."
+ return False
+
+ options.requires_editor = False
+ options.force = True
+ options.manual = False
+ return True
+
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
+ }
+
+ def _Steps(self):
+ return [
+ Preparation,
+ DetectLastRoll,
+ DetectRevisionToRoll,
+ PrepareRollCandidate,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(AutoRoll().Run())
diff --git a/tools/push-to-trunk/auto_tag.py b/tools/release/auto_tag.py
index a52a0286..a52a0286 100755
--- a/tools/push-to-trunk/auto_tag.py
+++ b/tools/release/auto_tag.py
diff --git a/tools/push-to-trunk/check_clusterfuzz.py b/tools/release/check_clusterfuzz.py
index d4ba90ba..fc826c1d 100755
--- a/tools/push-to-trunk/check_clusterfuzz.py
+++ b/tools/release/check_clusterfuzz.py
@@ -53,6 +53,15 @@ BUG_SPECS = [
},
{
"args": {
+ "job_type": "linux_asan_d8",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
+ {
+ "args": {
"job_type": "linux_asan_d8_dbg",
"reproducible": "True",
"open": "True",
@@ -60,6 +69,33 @@ BUG_SPECS = [
},
"crash_state": ANY_RE,
},
+ {
+ "args": {
+ "job_type": "linux_asan_d8_v8_arm_dbg",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
+ {
+ "args": {
+ "job_type": "linux_asan_d8_v8_arm64_dbg",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
+ {
+ "args": {
+ "job_type": "linux_asan_d8_v8_mipsel_dbg",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
]
diff --git a/tools/push-to-trunk/common_includes.py b/tools/release/common_includes.py
index ac78ef8d..c2b64c38 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/release/common_includes.py
@@ -46,7 +46,10 @@ from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
-VERSION_FILE = os.path.join("src", "version.cc")
+DAY_IN_SECONDS = 24 * 60 * 60
+PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
+PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
+VERSION_FILE = os.path.join("include", "v8-version.h")
# V8 base directory.
V8_BASE = os.path.dirname(
@@ -202,6 +205,30 @@ def Command(cmd, args="", prefix="", pipe=True, cwd=None):
sys.stderr.flush()
+def SanitizeVersionTag(tag):
+ version_without_prefix = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
+ version_with_prefix = re.compile(r"^tags\/\d+\.\d+\.\d+(?:\.\d+)?$")
+
+ if version_without_prefix.match(tag):
+ return tag
+ elif version_with_prefix.match(tag):
+ return tag[len("tags/"):]
+ else:
+ return None
+
+
+def NormalizeVersionTags(version_tags):
+ normalized_version_tags = []
+
+ # Remove tags/ prefix because of packed refs.
+ for current_tag in version_tags:
+ version_tag = SanitizeVersionTag(current_tag)
+ if version_tag != None:
+ normalized_version_tags.append(version_tag)
+
+ return normalized_version_tags
+
+
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
@@ -329,9 +356,25 @@ class GitInterface(VCInterface):
return "origin/candidates"
def RemoteBranch(self, name):
+ # Assume that if someone "fully qualified" the ref, they know what they
+ # want.
+ if name.startswith('refs/'):
+ return name
if name in ["candidates", "master"]:
- return "origin/%s" % name
- return "branch-heads/%s" % name
+ return "refs/remotes/origin/%s" % name
+ try:
+ # Check if branch is in heads.
+ if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
+ return "refs/remotes/origin/%s" % name
+ except GitFailedException:
+ pass
+ try:
+ # Check if branch is in branch-heads.
+ if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
+ return "refs/remotes/branch-heads/%s" % name
+ except GitFailedException:
+ pass
+ self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
@@ -380,7 +423,7 @@ class Step(GitRecipesMixin):
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
- return self._state[key]
+ return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
@@ -491,12 +534,12 @@ class Step(GitRecipesMixin):
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
- def DeleteBranch(self, name):
- for line in self.GitBranch().splitlines():
+ def DeleteBranch(self, name, cwd=None):
+ for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
- self.GitDeleteBranch(name)
+ self.GitDeleteBranch(name, cwd=cwd)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
@@ -518,8 +561,8 @@ class Step(GitRecipesMixin):
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Persist current branch.
- self["current_branch"] = self.GitCurrentBranch()
+ # Checkout master in case the script was left on a work branch.
+ self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
@@ -529,12 +572,8 @@ class Step(GitRecipesMixin):
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
- if ' ' in self["current_branch"]:
- self.GitCheckout('master')
- else:
- self.GitCheckout(self["current_branch"])
- if self._config["BRANCHNAME"] != self["current_branch"]:
- self.GitDeleteBranch(self._config["BRANCHNAME"])
+ self.GitCheckout('origin/master')
+ self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
@@ -550,10 +589,10 @@ class Step(GitRecipesMixin):
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
- for (var_name, def_name) in [("major", "MAJOR_VERSION"),
- ("minor", "MINOR_VERSION"),
- ("build", "BUILD_NUMBER"),
- ("patch", "PATCH_LEVEL")]:
+ for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
+ ("minor", "V8_MINOR_VERSION"),
+ ("build", "V8_BUILD_NUMBER"),
+ ("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
@@ -589,16 +628,80 @@ class Step(GitRecipesMixin):
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
- def FindLastTrunkPush(
- self, parent_hash="", branch="", include_patches=False):
- push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*"
- if not include_patches:
- # Non-patched versions only have three numbers followed by the "(based
- # on...) comment."
- push_pattern += " (based"
- branch = "" if parent_hash else branch or self.vc.RemoteCandidateBranch()
- return self.GitLog(n=1, format="%H", grep=push_pattern,
- parent_hash=parent_hash, branch=branch)
+ def GetVersionTag(self, revision):
+ tag = self.Git("describe --tags %s" % revision).strip()
+ return SanitizeVersionTag(tag)
+
+ def GetRecentReleases(self, max_age):
+ # Make sure tags are fetched.
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+ # Current timestamp.
+ time_now = int(self._side_effect_handler.GetUTCStamp())
+
+ # List every tag from a given period.
+ revisions = self.Git("rev-list --max-age=%d --tags" %
+ int(time_now - max_age)).strip()
+
+ # Filter out revisions who's tag is off by one or more commits.
+ return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
+
+ def GetLatestVersion(self):
+ # Use cached version if available.
+ if self["latest_version"]:
+ return self["latest_version"]
+
+ # Make sure tags are fetched.
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+ all_tags = self.vc.GetTags()
+ only_version_tags = NormalizeVersionTags(all_tags)
+
+ version = sorted(only_version_tags,
+ key=SortingKey, reverse=True)[0]
+ self["latest_version"] = version
+ return version
+
+ def GetLatestRelease(self):
+ """The latest release is the git hash of the latest tagged version.
+
+ This revision should be rolled into chromium.
+ """
+ latest_version = self.GetLatestVersion()
+
+ # The latest release.
+ latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+ assert latest_hash
+ return latest_hash
+
+ def GetLatestReleaseBase(self, version=None):
+ """The latest release base is the latest revision that is covered in the
+ last change log file. It doesn't include cherry-picked patches.
+ """
+ latest_version = version or self.GetLatestVersion()
+
+ # Strip patch level if it exists.
+ latest_version = ".".join(latest_version.split(".")[:3])
+
+ # The latest release base.
+ latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+ assert latest_hash
+
+ title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
+ match = PUSH_MSG_GIT_RE.match(title)
+ if match:
+ # Legacy: In the old process there's one level of indirection. The
+ # version is on the candidates branch and points to the real release
+ # base on master through the commit message.
+ return match.group("git_rev")
+ match = PUSH_MSG_NEW_RE.match(title)
+ if match:
+ # This is a new-style v8 version branched from master. The commit
+ # "latest_hash" is the version-file change. Its parent is the release
+ # base on master.
+ return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
+
+ self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
@@ -606,25 +709,41 @@ class Step(GitRecipesMixin):
self[prefix + "build"],
self[prefix + "patch"]])
+ def StoreVersion(self, version, prefix):
+ version_parts = version.split(".")
+ if len(version_parts) == 3:
+ version_parts.append("0")
+ major, minor, build, patch = version_parts
+ self[prefix + "major"] = major
+ self[prefix + "minor"] = minor
+ self[prefix + "build"] = build
+ self[prefix + "patch"] = patch
+
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
- if line.startswith("#define MAJOR_VERSION"):
+ if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
- elif line.startswith("#define MINOR_VERSION"):
+ elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
- elif line.startswith("#define BUILD_NUMBER"):
+ elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
- elif line.startswith("#define PATCH_LEVEL"):
+ elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
+ elif (self[prefix + "candidate"] and
+ line.startswith("#define V8_IS_CANDIDATE_VERSION")):
+ line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
class BootstrapStep(Step):
- MESSAGE = "Bootstapping v8 checkout."
+ MESSAGE = "Bootstrapping checkout and state."
def RunStep(self):
+ # Reserve state entry for json output.
+ self['json_output'] = {}
+
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
@@ -650,40 +769,6 @@ class UploadStep(Step):
cc=self._options.cc)
-class DetermineV8Sheriff(Step):
- MESSAGE = "Determine the V8 sheriff for code review."
-
- def RunStep(self):
- self["sheriff"] = None
- if not self._options.sheriff: # pragma: no cover
- return
-
- try:
- # The googlers mapping maps @google.com accounts to @chromium.org
- # accounts.
- googlers = imp.load_source('googlers_mapping',
- self._options.googlers_mapping)
- googlers = googlers.list_to_dict(googlers.get_list())
- except: # pragma: no cover
- print "Skip determining sheriff without googler mapping."
- return
-
- # The sheriff determined by the rotation on the waterfall has a
- # @google.com account.
- url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
- match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
-
- # If "channel is sheriff", we can't match an account.
- if match:
- g_name = match.group(1)
- self["sheriff"] = googlers.get(g_name + "@google.com",
- g_name + "@chromium.org")
- self._options.reviewer = self["sheriff"]
- print "Found active sheriff: %s" % self["sheriff"]
- else:
- print "No active sheriff found."
-
-
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
@@ -730,14 +815,10 @@ class ScriptsBase(object):
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
- parser.add_argument("-g", "--googlers-mapping",
- help="Path to the script mapping google accounts.")
+ parser.add_argument("--json-output",
+ help="File to write results summary to.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
- parser.add_argument("--sheriff", default=False, action="store_true",
- help=("Determine current sheriff to review CLs. On "
- "success, this will overwrite the reviewer "
- "option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
@@ -756,10 +837,6 @@ class ScriptsBase(object):
print "Bad step number %d" % options.step
parser.print_help()
return None
- if options.sheriff and not options.googlers_mapping: # pragma: no cover
- print "To determine the current sheriff, requires the googler mapping"
- parser.print_help()
- return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
@@ -794,9 +871,16 @@ class ScriptsBase(object):
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
- for step in steps[options.step:]:
- if step.Run():
- return 0
+
+ try:
+ for step in steps[options.step:]:
+ if step.Run():
+ return 0
+ finally:
+ if options.json_output:
+ with open(options.json_output, "w") as f:
+ json.dump(self._state['json_output'], f)
+
return 0
def Run(self, args=None):
diff --git a/tools/release/create_release.py b/tools/release/create_release.py
new file mode 100755
index 00000000..3bbb50e4
--- /dev/null
+++ b/tools/release/create_release.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+import tempfile
+import urllib2
+
+from common_includes import *
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ fetchspecs = [
+ "+refs/heads/*:refs/heads/*",
+ "+refs/pending/*:refs/pending/*",
+ "+refs/pending-tags/*:refs/pending-tags/*",
+ ]
+ self.Git("fetch origin %s" % " ".join(fetchspecs))
+ self.GitCheckout("origin/master")
+ self.DeleteBranch("work-branch")
+
+
+class PrepareBranchRevision(Step):
+ MESSAGE = "Check from which revision to branch off."
+
+ def RunStep(self):
+ self["push_hash"] = (self._options.revision or
+ self.GitLog(n=1, format="%H", branch="origin/master"))
+ assert self["push_hash"]
+ print "Release revision %s" % self["push_hash"]
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ latest_version = self.GetLatestVersion()
+
+ # The version file on master can be used to bump up major/minor at
+ # branch time.
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
+ self.ReadAndPersistVersion("master_")
+ master_version = self.ArrayToVersion("master_")
+
+ # Use the highest version from master or from tags to determine the new
+ # version.
+ authoritative_version = sorted(
+ [master_version, latest_version], key=SortingKey)[1]
+ self.StoreVersion(authoritative_version, "authoritative_")
+
+ # Variables prefixed with 'new_' contain the new version numbers for the
+ # ongoing candidates push.
+ self["new_major"] = self["authoritative_major"]
+ self["new_minor"] = self["authoritative_minor"]
+ self["new_build"] = str(int(self["authoritative_build"]) + 1)
+
+ # Make sure patch level is 0 in a new push.
+ self["new_patch"] = "0"
+
+ # The new version is not a candidate.
+ self["new_candidate"] = "0"
+
+ self["version"] = "%s.%s.%s" % (self["new_major"],
+ self["new_minor"],
+ self["new_build"])
+
+ print ("Incremented version to %s" % self["version"])
+
+
+class DetectLastRelease(Step):
+ MESSAGE = "Detect commit ID of last release base."
+
+ def RunStep(self):
+ self["last_push_master"] = self.GetLatestReleaseBase()
+
+
+class PrepareChangeLog(Step):
+ MESSAGE = "Prepare raw ChangeLog entry."
+
+ def Reload(self, body):
+ """Attempts to reload the commit message from rietveld in order to allow
+ late changes to the LOG flag. Note: This is brittle to future changes of
+ the web page name or structure.
+ """
+ match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
+ body, flags=re.M)
+ if match:
+ cl_url = ("https://codereview.chromium.org/%s/description"
+ % match.group(1))
+ try:
+ # Fetch from Rietveld but only retry once with one second delay since
+ # there might be many revisions.
+ body = self.ReadURL(cl_url, wait_plan=[1])
+ except urllib2.URLError: # pragma: no cover
+ pass
+ return body
+
+ def RunStep(self):
+ self["date"] = self.GetDate()
+ output = "%s: Version %s\n\n" % (self["date"], self["version"])
+ TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
+ commits = self.GitLog(format="%H",
+ git_hash="%s..%s" % (self["last_push_master"],
+ self["push_hash"]))
+
+ # Cache raw commit messages.
+ commit_messages = [
+ [
+ self.GitLog(n=1, format="%s", git_hash=commit),
+ self.Reload(self.GitLog(n=1, format="%B", git_hash=commit)),
+ self.GitLog(n=1, format="%an", git_hash=commit),
+ ] for commit in commits.splitlines()
+ ]
+
+ # Auto-format commit messages.
+ body = MakeChangeLogBody(commit_messages, auto_format=True)
+ AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
+
+ msg = (" Performance and stability improvements on all platforms."
+ "\n#\n# The change log above is auto-generated. Please review if "
+ "all relevant\n# commit messages from the list below are included."
+ "\n# All lines starting with # will be stripped.\n#\n")
+ AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Include unformatted commit messages as a reference in a comment.
+ comment_body = MakeComment(MakeChangeLogBody(commit_messages))
+ AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class EditChangeLog(Step):
+ MESSAGE = "Edit ChangeLog entry."
+
+ def RunStep(self):
+ print ("Please press <Return> to have your EDITOR open the ChangeLog "
+ "entry, then edit its contents to your liking. When you're done, "
+ "save the file and exit your EDITOR. ")
+ self.ReadLine(default="")
+ self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Strip comments and reformat with correct indentation.
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
+ changelog_entry = StripComments(changelog_entry)
+ changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
+ changelog_entry = changelog_entry.lstrip()
+
+ if changelog_entry == "": # pragma: no cover
+ self.Die("Empty ChangeLog entry.")
+
+ # Safe new change log for adding it later to the candidates patch.
+ TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class MakeBranch(Step):
+ MESSAGE = "Create the branch."
+
+ def RunStep(self):
+ self.Git("reset --hard origin/master")
+ self.Git("checkout -b work-branch %s" % self["push_hash"])
+ self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
+ self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
+
+
+class AddChangeLog(Step):
+ MESSAGE = "Add ChangeLog changes to release branch."
+
+ def RunStep(self):
+ changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+ old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
+ new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
+ TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE))
+
+
+class SetVersion(Step):
+ MESSAGE = "Set correct version for candidates."
+
+ def RunStep(self):
+ self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
+
+
+class CommitBranch(Step):
+ MESSAGE = "Commit version and changelog to new branch."
+
+ def RunStep(self):
+ # Convert the ChangeLog entry to commit message format.
+ text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+
+ # Remove date and trailing white space.
+ text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
+
+ # Remove indentation and merge paragraphs into single long lines, keeping
+ # empty lines between them.
+ def SplitMapJoin(split_text, fun, join_text):
+ return lambda text: join_text.join(map(fun, text.split(split_text)))
+ text = SplitMapJoin(
+ "\n\n", SplitMapJoin("\n", str.strip, " "), "\n\n")(text)
+
+ if not text: # pragma: no cover
+ self.Die("Commit message editing failed.")
+ self["commit_title"] = text.splitlines()[0]
+ TextToFile(text, self.Config("COMMITMSG_FILE"))
+
+ self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
+ os.remove(self.Config("COMMITMSG_FILE"))
+ os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
+
+
+class PushBranch(Step):
+ MESSAGE = "Push changes."
+
+ def RunStep(self):
+ pushspecs = [
+ "refs/heads/work-branch:refs/pending/heads/%s" % self["version"],
+ "%s:refs/pending-tags/heads/%s" % (self["push_hash"], self["version"]),
+ "%s:refs/heads/%s" % (self["push_hash"], self["version"]),
+ ]
+ cmd = "push origin %s" % " ".join(pushspecs)
+ if self._options.dry_run:
+ print "Dry run. Command:\ngit %s" % cmd
+ else:
+ self.Git(cmd)
+
+
+class TagRevision(Step):
+ MESSAGE = "Tag the new revision."
+
+ def RunStep(self):
+ if self._options.dry_run:
+ print ("Dry run. Tagging \"%s\" with %s" %
+ (self["commit_title"], self["version"]))
+ else:
+ self.vc.Tag(self["version"],
+ "origin/%s" % self["version"],
+ self["commit_title"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ print("Congratulations, you have successfully created version %s."
+ % self["version"])
+
+ self.GitCheckout("origin/master")
+ self.DeleteBranch("work-branch")
+ self.Git("gc")
+
+
+class CreateRelease(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-f", "--force",
+ help="Don't prompt the user.",
+ default=True, action="store_true")
+ group.add_argument("-m", "--manual",
+ help="Prompt the user at every important step.",
+ default=False, action="store_true")
+ parser.add_argument("-R", "--revision",
+ help="The git commit ID to push (defaults to HEAD).")
+
+ def _ProcessOptions(self, options): # pragma: no cover
+ if not options.author or not options.reviewer:
+ print "Reviewer (-r) and author (-a) are required."
+ return False
+ return True
+
+ def _Config(self):
+ return {
+ "PERSISTFILE_BASENAME": "/tmp/create-releases-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/v8-create-releases-tempfile-changelog-entry",
+ "COMMITMSG_FILE": "/tmp/v8-create-releases-tempfile-commitmsg",
+ }
+
+ def _Steps(self):
+ return [
+ Preparation,
+ PrepareBranchRevision,
+ IncrementVersion,
+ DetectLastRelease,
+ PrepareChangeLog,
+ EditChangeLog,
+ MakeBranch,
+ AddChangeLog,
+ SetVersion,
+ CommitBranch,
+ PushBranch,
+ TagRevision,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(CreateRelease().Run())
diff --git a/tools/push-to-trunk/git_recipes.py b/tools/release/git_recipes.py
index 3d2a9ef8..89fd7c9c 100644
--- a/tools/push-to-trunk/git_recipes.py
+++ b/tools/release/git_recipes.py
@@ -163,7 +163,7 @@ class GitRecipesMixin(object):
@Strip
def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
- branch="", reverse=False, **kwargs):
+ branch="", path=None, reverse=False, **kwargs):
assert not (git_hash and parent_hash)
args = ["log"]
if n > 0:
@@ -179,8 +179,15 @@ class GitRecipesMixin(object):
if parent_hash:
args.append("%s^" % parent_hash)
args.append(branch)
+ if path:
+ args.extend(["--", path])
return self.Git(MakeArgs(args), **kwargs)
+ def GitShowFile(self, refspec, path, **kwargs):
+ assert refspec
+ assert path
+ return self.Git(MakeArgs(["show", "%s:%s" % (refspec, path)]), **kwargs)
+
def GitGetPatch(self, git_hash, **kwargs):
assert git_hash
return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
@@ -241,8 +248,8 @@ class GitRecipesMixin(object):
def GitPull(self, **kwargs):
self.Git("pull", **kwargs)
- def GitFetchOrigin(self, **kwargs):
- self.Git("fetch origin", **kwargs)
+ def GitFetchOrigin(self, *refspecs, **kwargs):
+ self.Git(MakeArgs(["fetch", "origin"] + list(refspecs)), **kwargs)
@Strip
# Copied from bot_update.py and modified for svn-like numbers only.
@@ -273,3 +280,6 @@ class GitRecipesMixin(object):
return match.group(1)
raise GitFailedException("Couldn't determine commit position for %s" %
git_hash)
+
+ def GitGetHashOfTag(self, tag_name, **kwargs):
+ return self.Git("rev-list -1 " + tag_name).strip().encode("ascii", "ignore")
diff --git a/tools/push-to-trunk/merge_to_branch.py b/tools/release/merge_to_branch.py
index 9e7f1fb4..699fe1b3 100755
--- a/tools/push-to-trunk/merge_to_branch.py
+++ b/tools/release/merge_to_branch.py
@@ -47,10 +47,7 @@ class Preparation(Step):
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
- if self._options.revert_bleeding_edge:
- # FIXME(machenbach): Make revert bleeding_edge obsolete?
- self["merge_to_branch"] = "bleeding_edge"
- elif self._options.branch:
+ if self._options.branch:
self["merge_to_branch"] = self._options.branch
else: # pragma: no cover
self.Die("Please specify a branch to merge to")
@@ -104,17 +101,13 @@ class CreateCommitMessage(Step):
def RunStep(self):
- # Stringify: [123, 234] -> "r123, r234"
- self["revision_list"] = ", ".join(map(lambda s: "r%s" % s,
- self["full_revision_list"]))
+ # Stringify: ["abcde", "12345"] -> "abcde, 12345"
+ self["revision_list"] = ", ".join(self["full_revision_list"])
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
- if self._options.revert and not self._options.revert_bleeding_edge:
- action_text = "Rollback of %s"
- else:
- action_text = "Merged %s"
+ action_text = "Merged %s"
# The commit message title is added below after the version is specified.
msg_pieces = [
@@ -147,17 +140,15 @@ class ApplyPatches(Step):
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
- self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"), self._options.revert)
+ self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
if self._options.patch:
- self.ApplyPatch(self._options.patch, self._options.revert)
+ self.ApplyPatch(self._options.patch)
class PrepareVersion(Step):
MESSAGE = "Prepare version file."
def RunStep(self):
- if self._options.revert_bleeding_edge:
- return
# This is used to calculate the patch level increment.
self.ReadAndPersistVersion()
@@ -166,15 +157,13 @@ class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
- if self._options.revert_bleeding_edge:
- return
new_patch = str(int(self["patch"]) + 1)
- if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
+ if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
"EDITOR.)" % VERSION_FILE):
text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
- text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
+ text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_patch,
text)
TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
@@ -192,12 +181,7 @@ class CommitLocal(Step):
def RunStep(self):
# Add a commit message title.
- if self._options.revert and self._options.revert_bleeding_edge:
- # TODO(machenbach): Find a better convention if multiple patches are
- # reverted in one CL.
- self["commit_title"] = "Revert on master"
- else:
- self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
+ self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
@@ -218,8 +202,6 @@ class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
- if self._options.revert_bleeding_edge:
- return
print "Creating tag %s" % self["version"]
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
@@ -231,25 +213,21 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
- if not self._options.revert_bleeding_edge:
- print "*** SUMMARY ***"
- print "version: %s" % self["version"]
- print "branch: %s" % self["merge_to_branch"]
- if self["revision_list"]:
- print "patches: %s" % self["revision_list"]
+ print "*** SUMMARY ***"
+ print "version: %s" % self["version"]
+ print "branch: %s" % self["merge_to_branch"]
+ if self["revision_list"]:
+ print "patches: %s" % self["revision_list"]
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "bleeding_edge to other branches, including trunk.")
+ "master to other branches, including candidates.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
- group.add_argument("-R", "--revert-bleeding-edge",
- help="Revert specified patches from bleeding edge.",
- default=False, action="store_true")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
parser.add_argument("-f", "--force",
@@ -257,14 +235,10 @@ class MergeToBranch(ScriptsBase):
default=False, action="store_true")
parser.add_argument("-m", "--message",
help="A commit message for the patch.")
- parser.add_argument("--revert",
- help="Revert specified patches.",
- default=False, action="store_true")
parser.add_argument("-p", "--patch",
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
- # TODO(machenbach): Add a test that covers revert from bleeding_edge
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
diff --git a/tools/release/mergeinfo.py b/tools/release/mergeinfo.py
new file mode 100755
index 00000000..7f8b9cba
--- /dev/null
+++ b/tools/release/mergeinfo.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+
+from search_related_commits import git_execute
+
+GIT_OPTION_HASH_ONLY = '--pretty=format:%H'
+GIT_OPTION_NO_DIFF = '--quiet'
+GIT_OPTION_ONELINE = '--oneline'
+
+def describe_commit(git_working_dir, hash_to_search, one_line=False):
+ if one_line:
+ return git_execute(git_working_dir, ['show',
+ GIT_OPTION_NO_DIFF,
+ GIT_OPTION_ONELINE,
+ hash_to_search]).strip()
+ return git_execute(git_working_dir, ['show',
+ GIT_OPTION_NO_DIFF,
+ hash_to_search]).strip()
+
+
+def get_followup_commits(git_working_dir, hash_to_search):
+ return git_execute(git_working_dir, ['log',
+ '--grep=' + hash_to_search,
+ GIT_OPTION_HASH_ONLY,
+ 'master']).strip().splitlines()
+
+def get_merge_commits(git_working_dir, hash_to_search):
+ merges = get_related_commits_not_on_master(git_working_dir, hash_to_search)
+ false_merges = get_related_commits_not_on_master(
+ git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
+ false_merges = set(false_merges)
+ return ([merge_commit for merge_commit in merges
+ if merge_commit not in false_merges])
+
+def get_related_commits_not_on_master(git_working_dir, grep_command):
+ commits = git_execute(git_working_dir, ['log',
+ '--all',
+ '--grep=' + grep_command,
+ GIT_OPTION_ONELINE,
+ '--decorate',
+ '--not',
+ 'master',
+ GIT_OPTION_HASH_ONLY])
+ return commits.splitlines()
+
+def get_branches_for_commit(git_working_dir, hash_to_search):
+ branches = git_execute(git_working_dir, ['branch',
+ '--contains',
+ hash_to_search,
+ '-a']).strip()
+ branches = branches.splitlines()
+ return map(str.strip, branches)
+
+def is_lkgr(git_working_dir, hash_to_search):
+ branches = get_branches_for_commit(git_working_dir, hash_to_search)
+ return 'remotes/origin/lkgr' in branches
+
+def get_first_canary(git_working_dir, hash_to_search):
+ branches = get_branches_for_commit(git_working_dir, hash_to_search)
+ canaries = ([currentBranch for currentBranch in branches if
+ currentBranch.startswith('remotes/origin/chromium/')])
+ canaries.sort()
+ if len(canaries) == 0:
+ return 'No Canary coverage'
+ return canaries[0].split('/')[-1]
+
+def print_analysis(git_working_dir, hash_to_search):
+ print '1.) Searching for "' + hash_to_search + '"'
+ print '=====================ORIGINAL COMMIT START==================='
+ print describe_commit(git_working_dir, hash_to_search)
+ print '=====================ORIGINAL COMMIT END====================='
+ print '2.) General information:'
+ print 'Is LKGR: ' + str(is_lkgr(git_working_dir, hash_to_search))
+ print 'Is on Canary: ' + (
+ str(get_first_canary(git_working_dir, hash_to_search)))
+ print '3.) Found follow-up commits, reverts and ports:'
+ followups = get_followup_commits(git_working_dir, hash_to_search)
+ for followup in followups:
+ print describe_commit(git_working_dir, followup, True)
+
+ print '4.) Found merges:'
+ merges = get_merge_commits(git_working_dir, hash_to_search)
+ for currentMerge in merges:
+ print describe_commit(git_working_dir, currentMerge, True)
+ print '---Merged to:'
+ mergeOutput = git_execute(git_working_dir, ['branch',
+ '--contains',
+ currentMerge,
+ '-r']).strip()
+ print mergeOutput
+ print 'Finished successfully'
+
+if __name__ == '__main__': # pragma: no cover
+ parser = argparse.ArgumentParser('Tool to check where a git commit was'
+ ' merged and reverted.')
+
+ parser.add_argument('-g', '--git-dir', required=False, default='.',
+ help='The path to your git working directory.')
+
+ parser.add_argument('hash',
+ nargs=1,
+ help='Hash of the commit to be searched.')
+
+ args = sys.argv[1:]
+ options = parser.parse_args(args)
+
+ sys.exit(print_analysis(options.git_dir, options.hash[0]))
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/release/push_to_candidates.py
index 6e821f2a..750794ea 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/release/push_to_candidates.py
@@ -35,7 +35,7 @@ import urllib2
from common_includes import *
PUSH_MSG_GIT_SUFFIX = " (based on %s)"
-PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
+
class Preparation(Step):
MESSAGE = "Preparation."
@@ -44,12 +44,12 @@ class Preparation(Step):
self.InitialEnvironmentChecks(self.default_cwd)
self.CommonPrepare()
- if(self["current_branch"] == self.Config("TRUNKBRANCH")
+ if(self["current_branch"] == self.Config("CANDIDATESBRANCH")
or self["current_branch"] == self.Config("BRANCHNAME")):
print "Warning: Script started on branch %s" % self["current_branch"]
self.PrepareBranch()
- self.DeleteBranch(self.Config("TRUNKBRANCH"))
+ self.DeleteBranch(self.Config("CANDIDATESBRANCH"))
class FreshBranch(Step):
@@ -72,92 +72,29 @@ class PreparePushRevision(Step):
self.Die("Could not determine the git hash for the push.")
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of last push to trunk."
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
def RunStep(self):
- last_push = self._options.last_push or self.FindLastTrunkPush()
- while True:
- # Print assumed commit, circumventing git's pager.
- print self.GitLog(n=1, git_hash=last_push)
- if self.Confirm("Is the commit printed above the last push to trunk?"):
- break
- last_push = self.FindLastTrunkPush(parent_hash=last_push)
-
- if self._options.last_bleeding_edge:
- # Read the bleeding edge revision of the last push from a command-line
- # option.
- last_push_bleeding_edge = self._options.last_bleeding_edge
- else:
- # Retrieve the bleeding edge revision of the last push from the text in
- # the push commit message.
- last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
- last_push_bleeding_edge = PUSH_MSG_GIT_RE.match(
- last_push_title).group("git_rev")
-
- if not last_push_bleeding_edge: # pragma: no cover
- self.Die("Could not retrieve bleeding edge git hash for trunk push %s"
- % last_push)
-
- # This points to the git hash of the last push on trunk.
- self["last_push_trunk"] = last_push
- # This points to the last bleeding_edge revision that went into the last
- # push.
- # TODO(machenbach): Do we need a check to make sure we're not pushing a
- # revision older than the last push? If we do this, the output of the
- # current change log preparation won't make much sense.
- self["last_push_bleeding_edge"] = last_push_bleeding_edge
-
-
-# TODO(machenbach): Code similarities with bump_up_version.py. Merge after
-# turning this script into a pure git script.
-class GetCurrentBleedingEdgeVersion(Step):
- MESSAGE = "Get latest bleeding edge version."
+ latest_version = self.GetLatestVersion()
- def RunStep(self):
+ # The version file on master can be used to bump up major/minor at
+ # branch time.
self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
+ self.ReadAndPersistVersion("master_")
+ master_version = self.ArrayToVersion("master_")
- # Store latest version.
- self.ReadAndPersistVersion("latest_")
- self["latest_version"] = self.ArrayToVersion("latest_")
- print "Bleeding edge version: %s" % self["latest_version"]
-
-
-class IncrementVersion(Step):
- MESSAGE = "Increment version number."
-
- def RunStep(self):
- # Retrieve current version from last trunk push.
- self.GitCheckoutFile(VERSION_FILE, self["last_push_trunk"])
- self.ReadAndPersistVersion()
- self["trunk_version"] = self.ArrayToVersion("")
-
- if self["latest_build"] == "9999": # pragma: no cover
- # If version control on bleeding edge was switched off, just use the last
- # trunk version.
- self["latest_version"] = self["trunk_version"]
-
- if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]):
- # If the version on bleeding_edge is newer than on trunk, use it.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
- self.ReadAndPersistVersion()
-
- if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
- "fire up your EDITOR on %s so you can make arbitrary "
- "changes. When you're done, save the file and exit your "
- "EDITOR.)" % VERSION_FILE)):
-
- text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
- text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
- r"\g<space>%s" % str(int(self["build"]) + 1),
- text)
- TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
- else:
- self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
+ # Use the highest version from master or from tags to determine the new
+ # version.
+ authoritative_version = sorted(
+ [master_version, latest_version], key=SortingKey)[1]
+ self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
- # ongoing trunk push.
- self.ReadAndPersistVersion("new_")
+ # ongoing candidates push.
+ self["new_major"] = self["authoritative_major"]
+ self["new_minor"] = self["authoritative_minor"]
+ self["new_build"] = str(int(self["authoritative_build"]) + 1)
# Make sure patch level is 0 in a new push.
self["new_patch"] = "0"
@@ -166,6 +103,18 @@ class IncrementVersion(Step):
self["new_minor"],
self["new_build"])
+ print ("Incremented version to %s" % self["version"])
+
+
+class DetectLastRelease(Step):
+ MESSAGE = "Detect commit ID of last release base."
+
+ def RunStep(self):
+ if self._options.last_master:
+ self["last_push_master"] = self._options.last_master
+ else:
+ self["last_push_master"] = self.GetLatestReleaseBase()
+
class PrepareChangeLog(Step):
MESSAGE = "Prepare raw ChangeLog entry."
@@ -193,7 +142,7 @@ class PrepareChangeLog(Step):
output = "%s: Version %s\n\n" % (self["date"], self["version"])
TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
commits = self.GitLog(format="%H",
- git_hash="%s..%s" % (self["last_push_bleeding_edge"],
+ git_hash="%s..%s" % (self["last_push_master"],
self["push_hash"]))
# Cache raw commit messages.
@@ -239,7 +188,7 @@ class EditChangeLog(Step):
if changelog_entry == "": # pragma: no cover
self.Die("Empty ChangeLog entry.")
- # Safe new change log for adding it later to the trunk patch.
+ # Safe new change log for adding it later to the candidates patch.
TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
@@ -286,10 +235,10 @@ class SquashCommits(Step):
class NewBranch(Step):
- MESSAGE = "Create a new branch from trunk."
+ MESSAGE = "Create a new branch from candidates."
def RunStep(self):
- self.GitCreateBranch(self.Config("TRUNKBRANCH"),
+ self.GitCreateBranch(self.Config("CANDIDATESBRANCH"),
self.vc.RemoteCandidateBranch())
@@ -299,16 +248,41 @@ class ApplyChanges(Step):
def RunStep(self):
self.ApplyPatch(self.Config("PATCH_FILE"))
os.remove(self.Config("PATCH_FILE"))
+ # The change log has been modified by the patch. Reset it to the version
+ # on candidates and apply the exact changes determined by this
+ # PrepareChangeLog step above.
+ self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
+ # The version file has been modified by the patch. Reset it to the version
+ # on candidates.
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteCandidateBranch())
+
+
+class CommitSquash(Step):
+ MESSAGE = "Commit to local candidates branch."
+
+ def RunStep(self):
+ # Make a first commit with a slightly different title to not confuse
+ # the tagging.
+ msg = FileToText(self.Config("COMMITMSG_FILE")).splitlines()
+ msg[0] = msg[0].replace("(based on", "(squashed - based on")
+ self.GitCommit(message = "\n".join(msg))
+
+
+class PrepareVersionBranch(Step):
+ MESSAGE = "Prepare new branch to commit version and changelog file."
+
+ def RunStep(self):
+ self.GitCheckout("master")
+ self.Git("fetch")
+ self.GitDeleteBranch(self.Config("CANDIDATESBRANCH"))
+ self.GitCreateBranch(self.Config("CANDIDATESBRANCH"),
+ self.vc.RemoteCandidateBranch())
class AddChangeLog(Step):
- MESSAGE = "Add ChangeLog changes to trunk branch."
+ MESSAGE = "Add ChangeLog changes to candidates branch."
def RunStep(self):
- # The change log has been modified by the patch. Reset it to the version
- # on trunk and apply the exact changes determined by this PrepareChangeLog
- # step above.
- self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
@@ -317,17 +291,14 @@ class AddChangeLog(Step):
class SetVersion(Step):
- MESSAGE = "Set correct version for trunk."
+ MESSAGE = "Set correct version for candidates."
def RunStep(self):
- # The version file has been modified by the patch. Reset it to the version
- # on trunk and apply the correct version.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteCandidateBranch())
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
-class CommitTrunk(Step):
- MESSAGE = "Commit to local trunk branch."
+class CommitCandidate(Step):
+ MESSAGE = "Commit version and changelog to local candidates branch."
def RunStep(self):
self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
@@ -341,7 +312,7 @@ class SanityCheck(Step):
# TODO(machenbach): Run presubmit script here as it is now missing in the
# prepare push process.
if not self.Confirm("Please check if your local checkout is sane: Inspect "
- "%s, compile, run tests. Do you want to commit this new trunk "
+ "%s, compile, run tests. Do you want to commit this new candidates "
"revision to the repository?" % VERSION_FILE):
self.Die("Execution canceled.") # pragma: no cover
@@ -365,16 +336,16 @@ class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
- print("Congratulations, you have successfully created the trunk "
+ print("Congratulations, you have successfully created the candidates "
"revision %s."
% self["version"])
self.CommonCleanup()
- if self.Config("TRUNKBRANCH") != self["current_branch"]:
- self.GitDeleteBranch(self.Config("TRUNKBRANCH"))
+ if self.Config("CANDIDATESBRANCH") != self["current_branch"]:
+ self.GitDeleteBranch(self.Config("CANDIDATESBRANCH"))
-class PushToTrunk(ScriptsBase):
+class PushToCandidates(ScriptsBase):
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--force",
@@ -383,12 +354,12 @@ class PushToTrunk(ScriptsBase):
group.add_argument("-m", "--manual",
help="Prompt the user at every important step.",
default=False, action="store_true")
- parser.add_argument("-b", "--last-bleeding-edge",
- help=("The git commit ID of the last bleeding edge "
- "revision that was pushed to trunk. This is "
- "used for the auto-generated ChangeLog entry."))
+ parser.add_argument("-b", "--last-master",
+ help=("The git commit ID of the last master "
+ "revision that was pushed to candidates. This is"
+ " used for the auto-generated ChangeLog entry."))
parser.add_argument("-l", "--last-push",
- help="The git commit ID of the last push to trunk.")
+ help="The git commit ID of the last candidates push.")
parser.add_argument("-R", "--revision",
help="The git commit ID to push (defaults to HEAD).")
@@ -406,11 +377,12 @@ class PushToTrunk(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-push",
- "TRUNKBRANCH": "trunk-push",
- "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
- "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
- "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
- "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+ "CANDIDATESBRANCH": "candidates-push",
+ "PERSISTFILE_BASENAME": "/tmp/v8-push-to-candidates-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/v8-push-to-candidates-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/v8-push-to-candidates-tempfile-patch-file",
+ "COMMITMSG_FILE": "/tmp/v8-push-to-candidates-tempfile-commitmsg",
}
def _Steps(self):
@@ -418,19 +390,21 @@ class PushToTrunk(ScriptsBase):
Preparation,
FreshBranch,
PreparePushRevision,
- DetectLastPush,
- GetCurrentBleedingEdgeVersion,
IncrementVersion,
+ DetectLastRelease,
PrepareChangeLog,
EditChangeLog,
StragglerCommits,
SquashCommits,
NewBranch,
ApplyChanges,
+ CommitSquash,
+ SanityCheck,
+ Land,
+ PrepareVersionBranch,
AddChangeLog,
SetVersion,
- CommitTrunk,
- SanityCheck,
+ CommitCandidate,
Land,
TagRevision,
CleanUp,
@@ -438,4 +412,4 @@ class PushToTrunk(ScriptsBase):
if __name__ == "__main__": # pragma: no cover
- sys.exit(PushToTrunk().Run())
+ sys.exit(PushToCandidates().Run())
diff --git a/tools/push-to-trunk/releases.py b/tools/release/releases.py
index 1a5b15ca..7b659ccb 100755
--- a/tools/push-to-trunk/releases.py
+++ b/tools/release/releases.py
@@ -3,7 +3,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# This script retrieves the history of all V8 branches and trunk revisions and
+# This script retrieves the history of all V8 branches and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
@@ -60,6 +60,7 @@ DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
+OMAHA_PROXY_URL = "http://omahaproxy.appspot.com/"
def SortBranches(branches):
"""Sort branches with version number names."""
@@ -70,7 +71,7 @@ def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
- cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
+ cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
"""
last = ""
result = []
@@ -87,8 +88,9 @@ def BuildRevisionRanges(cr_releases):
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
- cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
- cr_rev either refers to a chromium svn revision or a chromium branch number.
+ cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
+ cr_rev either refers to a chromium commit position or a chromium branch
+ number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
@@ -100,7 +102,6 @@ def BuildRevisionRanges(cr_releases):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
- # TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
@@ -111,7 +112,7 @@ def BuildRevisionRanges(cr_releases):
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
- return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
+ return dict((hsh, ", ".join(ran)) for hsh, ran in range_lists.iteritems())
def MatchSafe(match):
@@ -136,7 +137,7 @@ class RetrieveV8Releases(Step):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
- def GetBleedingEdgeGitFromPush(self, title):
+ def GetMasterHashFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
@@ -161,7 +162,7 @@ class RetrieveV8Releases(Step):
def GetReleaseDict(
- self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
+ self, git_hash, master_position, master_hash, branch, version,
patches, cl_body):
revision = self.GetCommitPositionNumber(git_hash)
return {
@@ -170,9 +171,9 @@ class RetrieveV8Releases(Step):
# The git revision on the branch.
"revision_git": git_hash,
# The cr commit position number on master.
- "bleeding_edge": bleeding_edge_rev,
+ "master_position": master_position,
# The same for git.
- "bleeding_edge_git": bleeding_edge_git,
+ "master_hash": master_hash,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
@@ -185,8 +186,8 @@ class RetrieveV8Releases(Step):
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
- # Link to the CL on code review. Trunk pushes are not uploaded, so this
- # field will be populated below with the recent roll CL link.
+ # Link to the CL on code review. Candiates pushes are not uploaded,
+ # so this field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
@@ -207,31 +208,20 @@ class RetrieveV8Releases(Step):
else:
patches = self.GetMergedPatches(body)
- title = self.GitLog(n=1, format="%s", git_hash=git_hash)
- bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
- bleeding_edge_position = ""
- if bleeding_edge_git:
- bleeding_edge_position = self.GetCommitPositionNumber(bleeding_edge_git)
- # TODO(machenbach): Add the commit position number.
+ if SortingKey("4.2.69") <= SortingKey(version):
+ master_hash = self.GetLatestReleaseBase(version=version)
+ else:
+ # Legacy: Before version 4.2.69, the master revision was determined
+ # by commit message.
+ title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+ master_hash = self.GetMasterHashFromPush(title)
+ master_position = ""
+ if master_hash:
+ master_position = self.GetCommitPositionNumber(master_hash)
return self.GetReleaseDict(
- git_hash, bleeding_edge_position, bleeding_edge_git, branch, version,
+ git_hash, master_position, master_hash, branch, version,
patches, body), self["patch"]
- def GetReleasesFromMaster(self):
- # TODO(machenbach): Implement this in git as soon as we tag again on
- # master.
- # tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v
- # --limit 20")
- # releases = []
- # for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
- # git_hash = self.vc.SvnGit(revision)
-
- # Add bleeding edge release. It does not contain patches or a code
- # review link, as tags are not uploaded.
- # releases.append(self.GetReleaseDict(
- # git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
- return []
-
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch))
if branch == self.vc.MasterBranch():
@@ -265,28 +255,58 @@ class RetrieveV8Releases(Step):
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
+ def GetReleaseFromRevision(self, revision):
+ releases = []
+ try:
+ if (VERSION_FILE not in self.GitChangedFiles(revision) or
+ not self.GitCheckoutFileSafe(VERSION_FILE, revision)):
+ print "Skipping revision %s" % revision
+ return [] # pragma: no cover
+
+ branches = map(
+ str.strip,
+ self.Git("branch -r --contains %s" % revision).strip().splitlines(),
+ )
+ branch = ""
+ for b in branches:
+ if b.startswith("origin/"):
+ branch = b.split("origin/")[1]
+ break
+ if b.startswith("branch-heads/"):
+ branch = b.split("branch-heads/")[1]
+ break
+ else:
+ print "Could not determine branch for %s" % revision
+
+ release, _ = self.GetRelease(revision, branch)
+ releases.append(release)
+
+ # Allow Ctrl-C interrupt.
+ except (KeyboardInterrupt, SystemExit): # pragma: no cover
+ pass
+
+ # Clean up checked-out version file.
+ self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
+ return releases
+
+
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
- branches = self.vc.GetBranches()
releases = []
if self._options.branch == 'recent':
- # Get only recent development on trunk, beta and stable.
- if self._options.max_releases == 0: # pragma: no cover
- self._options.max_releases = 10
- beta, stable = SortBranches(branches)[0:2]
- releases += self.GetReleasesFromBranch(stable)
- releases += self.GetReleasesFromBranch(beta)
- releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
- releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
+ # List every release from the last 7 days.
+ revisions = self.GetRecentReleases(max_age=7 * DAY_IN_SECONDS)
+ for revision in revisions:
+ releases += self.GetReleaseFromRevision(revision)
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
- for branch in branches:
+ for branch in self.vc.GetBranches():
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
else: # pragma: no cover
# Retrieve history for a specified branch.
- assert self._options.branch in (branches +
+ assert self._options.branch in (self.vc.GetBranches() +
[self.vc.CandidateBranch(), self.vc.MasterBranch()])
releases += self.GetReleasesFromBranch(self._options.branch)
@@ -295,27 +315,16 @@ class RetrieveV8Releases(Step):
reverse=True)
-class SwitchChromium(Step):
- MESSAGE = "Switch to Chromium checkout."
-
- def RunStep(self):
- cwd = self._options.chromium
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
- self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Assert that the DEPS file is there.
- if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
- self.Die("DEPS file not present.")
-
-
class UpdateChromiumCheckout(Step):
- MESSAGE = "Update the checkout and create a new branch."
+ MESSAGE = "Update the chromium checkout."
def RunStep(self):
cwd = self._options.chromium
- self.GitCheckout("master", cwd=cwd)
- self.GitPull(cwd=cwd)
- self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
+ self.GitFetchOrigin("+refs/heads/*:refs/remotes/origin/*",
+ "+refs/branch-heads/*:refs/remotes/branch-heads/*",
+ cwd=cwd)
+ # Update v8 checkout in chromium.
+ self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
def ConvertToCommitNumber(step, revision):
@@ -331,68 +340,56 @@ class RetrieveChromiumV8Releases(Step):
def RunStep(self):
cwd = self._options.chromium
- releases = filter(
- lambda r: r["branch"] in [self.vc.CandidateBranch(),
- self.vc.MasterBranch()],
- self["releases"])
- if not releases: # pragma: no cover
- print "No releases detected. Skipping chromium history."
- return True
-
- # Update v8 checkout in chromium.
- self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
- oldest_v8_rev = int(releases[-1]["revision"])
+ # All v8 revisions we are interested in.
+ releases_dict = dict((r["revision_git"], r) for r in self["releases"])
cr_releases = []
+ count_past_last_v8 = 0
try:
for git_hash in self.GitLog(
- format="%H", grep="V8", cwd=cwd).splitlines():
- if "DEPS" not in self.GitChangedFiles(git_hash, cwd=cwd):
- continue
- if not self.GitCheckoutFileSafe("DEPS", git_hash, cwd=cwd):
- break # pragma: no cover
- deps = FileToText(os.path.join(cwd, "DEPS"))
+ format="%H", grep="V8", branch="origin/master",
+ path="DEPS", cwd=cwd).splitlines():
+ deps = self.GitShowFile(git_hash, "DEPS", cwd=cwd)
match = DEPS_RE.search(deps)
if match:
cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
if cr_rev:
- v8_rev = ConvertToCommitNumber(self, match.group(1))
- cr_releases.append([cr_rev, v8_rev])
+ v8_hsh = match.group(1)
+ cr_releases.append([cr_rev, v8_hsh])
+
+ if count_past_last_v8:
+ count_past_last_v8 += 1 # pragma: no cover
- # Stop after reaching beyond the last v8 revision we want to update.
- # We need a small buffer for possible revert/reland frenzies.
- # TODO(machenbach): Subtraction is not git friendly.
- if int(v8_rev) < oldest_v8_rev - 100:
+ if count_past_last_v8 > 20:
break # pragma: no cover
+ # Stop as soon as we find a v8 revision that we didn't fetch in the
+ # v8-revision-retrieval part above (i.e. a revision that's too old).
+ # Just iterate a few more times in case there were reverts.
+ if v8_hsh not in releases_dict:
+ count_past_last_v8 += 1 # pragma: no cover
+
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
- # Clean up.
- self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
-
- # Add the chromium ranges to the v8 trunk and bleeding_edge releases.
+ # Add the chromium ranges to the v8 candidates and master releases.
all_ranges = BuildRevisionRanges(cr_releases)
- releases_dict = dict((r["revision"], r) for r in releases)
- for revision, ranges in all_ranges.iteritems():
- releases_dict.get(revision, {})["chromium_revision"] = ranges
+
+ for hsh, ranges in all_ranges.iteritems():
+ releases_dict.get(hsh, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
-class RietrieveChromiumBranches(Step):
+class RetrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
def RunStep(self):
cwd = self._options.chromium
- trunk_releases = filter(lambda r: r["branch"] == self.vc.CandidateBranch(),
- self["releases"])
- if not trunk_releases: # pragma: no cover
- print "No trunk releases detected. Skipping chromium history."
- return True
- oldest_v8_rev = int(trunk_releases[-1]["revision"])
+ # All v8 revisions we are interested in.
+ releases_dict = dict((r["revision_git"], r) for r in self["releases"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
@@ -405,44 +402,108 @@ class RietrieveChromiumBranches(Step):
branches = sorted(branches, reverse=True)
cr_branches = []
+ count_past_last_v8 = 0
try:
for branch in branches:
- if not self.GitCheckoutFileSafe("DEPS",
- "branch-heads/%d" % branch,
- cwd=cwd):
- break # pragma: no cover
- deps = FileToText(os.path.join(cwd, "DEPS"))
+ deps = self.GitShowFile(
+ "refs/branch-heads/%d" % branch, "DEPS", cwd=cwd)
match = DEPS_RE.search(deps)
if match:
- v8_rev = ConvertToCommitNumber(self, match.group(1))
- cr_branches.append([str(branch), v8_rev])
+ v8_hsh = match.group(1)
+ cr_branches.append([str(branch), v8_hsh])
+
+ if count_past_last_v8:
+ count_past_last_v8 += 1 # pragma: no cover
- # Stop after reaching beyond the last v8 revision we want to update.
- # We need a small buffer for possible revert/reland frenzies.
- # TODO(machenbach): Subtraction is not git friendly.
- if int(v8_rev) < oldest_v8_rev - 100:
+ if count_past_last_v8 > 20:
break # pragma: no cover
+ # Stop as soon as we find a v8 revision that we didn't fetch in the
+ # v8-revision-retrieval part above (i.e. a revision that's too old).
+ # Just iterate a few more times in case there were reverts.
+ if v8_hsh not in releases_dict:
+ count_past_last_v8 += 1 # pragma: no cover
+
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
- # Clean up.
- self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
-
- # Add the chromium branches to the v8 trunk releases.
+ # Add the chromium branches to the v8 candidate releases.
all_ranges = BuildRevisionRanges(cr_branches)
- trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
- trunk_dict.get(revision, {})["chromium_branch"] = ranges
+ releases_dict.get(revision, {})["chromium_branch"] = ranges
+
+
+class RetrieveInformationOnChromeReleases(Step):
+ MESSAGE = 'Retrieves relevant information on the latest Chrome releases'
+
+ def Run(self):
+
+ params = None
+ result_raw = self.ReadURL(
+ OMAHA_PROXY_URL + "all.json",
+ params,
+ wait_plan=[5, 20]
+ )
+ recent_releases = json.loads(result_raw)
+
+ canaries = []
+
+ for current_os in recent_releases:
+ for current_version in current_os["versions"]:
+ if current_version["channel"] != "canary":
+ continue
+
+ current_candidate = self._CreateCandidate(current_version)
+ canaries.append(current_candidate)
+
+ chrome_releases = {"canaries": canaries}
+ self["chrome_releases"] = chrome_releases
+
+ def _GetGitHashForV8Version(self, v8_version):
+ if v8_version == "N/A":
+ return ""
+
+ real_v8_version = v8_version
+ if v8_version.split(".")[3]== "0":
+ real_v8_version = v8_version[:-2]
+
+ try:
+ return self.GitGetHashOfTag(real_v8_version)
+ except GitFailedException:
+ return ""
+
+ def _CreateCandidate(self, current_version):
+ params = None
+ url_to_call = (OMAHA_PROXY_URL + "v8.json?version="
+ + current_version["previous_version"])
+ result_raw = self.ReadURL(
+ url_to_call,
+ params,
+ wait_plan=[5, 20]
+ )
+ previous_v8_version = json.loads(result_raw)["v8_version"]
+ v8_previous_version_hash = self._GetGitHashForV8Version(previous_v8_version)
+
+ current_v8_version = current_version["v8_version"]
+ v8_version_hash = self._GetGitHashForV8Version(current_v8_version)
+
+ current_candidate = {
+ "chrome_version": current_version["version"],
+ "os": current_version["os"],
+ "release_date": current_version["current_reldate"],
+ "v8_version": current_v8_version,
+ "v8_version_hash": v8_version_hash,
+ "v8_previous_version": previous_v8_version,
+ "v8_previous_version_hash": v8_previous_version_hash,
+ }
+ return current_candidate
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
- self.GitCheckout("master", cwd=self._options.chromium)
- self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
self.CommonCleanup()
@@ -450,6 +511,12 @@ class WriteOutput(Step):
MESSAGE = "Print output."
def Run(self):
+
+ output = {
+ "releases": self["releases"],
+ "chrome_releases": self["chrome_releases"],
+ }
+
if self._options.csv:
with open(self._options.csv, "w") as f:
writer = csv.DictWriter(f,
@@ -461,9 +528,9 @@ class WriteOutput(Step):
writer.writerow(release)
if self._options.json:
with open(self._options.json, "w") as f:
- f.write(json.dumps(self["releases"]))
+ f.write(json.dumps(output))
if not self._options.csv and not self._options.json:
- print self["releases"] # pragma: no cover
+ print output # pragma: no cover
class Releases(ScriptsBase):
@@ -471,7 +538,8 @@ class Releases(ScriptsBase):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
- "is specified, track beta, stable and trunk."))
+ "is specified, track beta, stable and "
+ "candidates."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
@@ -481,6 +549,7 @@ class Releases(ScriptsBase):
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
+ options.force_readline_defaults = True
return True
def _Config(self):
@@ -490,13 +559,14 @@ class Releases(ScriptsBase):
}
def _Steps(self):
+
return [
Preparation,
RetrieveV8Releases,
- SwitchChromium,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
- RietrieveChromiumBranches,
+ RetrieveChromiumBranches,
+ RetrieveInformationOnChromeReleases,
CleanUp,
WriteOutput,
]
diff --git a/tools/push-to-trunk/script_test.py b/tools/release/script_test.py
index cbb2134f..cbb2134f 100755
--- a/tools/push-to-trunk/script_test.py
+++ b/tools/release/script_test.py
diff --git a/tools/release/search_related_commits.py b/tools/release/search_related_commits.py
new file mode 100755
index 00000000..d27aa56f
--- /dev/null
+++ b/tools/release/search_related_commits.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import operator
+import os
+import re
+from sets import Set
+from subprocess import Popen, PIPE
+import sys
+
+def search_all_related_commits(
+ git_working_dir, start_hash, until, separator, verbose=False):
+
+ all_commits_raw = _find_commits_inbetween(
+ start_hash, until, git_working_dir, verbose)
+ if verbose:
+ print "All commits between <of> and <until>: " + all_commits_raw
+
+ # Adding start hash too
+ all_commits = [start_hash]
+ all_commits.extend(all_commits_raw.splitlines())
+ all_related_commits = {}
+ already_treated_commits = Set([])
+ for commit in all_commits:
+ if commit in already_treated_commits:
+ continue
+
+ related_commits = _search_related_commits(
+ git_working_dir, commit, until, separator, verbose)
+ if len(related_commits) > 0:
+ all_related_commits[commit] = related_commits
+ already_treated_commits.update(related_commits)
+
+ already_treated_commits.update(commit)
+
+ return all_related_commits
+
+def _search_related_commits(
+ git_working_dir, start_hash, until, separator, verbose=False):
+
+ if separator:
+ commits_between = _find_commits_inbetween(
+ start_hash, separator, git_working_dir, verbose)
+ if commits_between == "":
+ return []
+
+ # Extract commit position
+ original_message = git_execute(
+ git_working_dir,
+ ["show", "-s", "--format=%B", start_hash],
+ verbose)
+ title = original_message.splitlines()[0]
+
+ matches = re.search("(\{#)([0-9]*)(\})", original_message)
+
+ if not matches:
+ return []
+
+ commit_position = matches.group(2)
+ if verbose:
+ print "1.) Commit position to look for: " + commit_position
+
+ search_range = start_hash + ".." + until
+
+ def git_args(grep_pattern):
+ return [
+ "log",
+ "--reverse",
+ "--grep=" + grep_pattern,
+ "--format=%H",
+ search_range,
+ ]
+
+ found_by_hash = git_execute(
+ git_working_dir, git_args(start_hash), verbose).strip()
+
+ if verbose:
+ print "2.) Found by hash: " + found_by_hash
+
+ found_by_commit_pos = git_execute(
+ git_working_dir, git_args(commit_position), verbose).strip()
+
+ if verbose:
+ print "3.) Found by commit position: " + found_by_commit_pos
+
+ # Replace brackets or else they are wrongly interpreted by --grep
+ title = title.replace("[", "\\[")
+ title = title.replace("]", "\\]")
+
+ found_by_title = git_execute(
+ git_working_dir, git_args(title), verbose).strip()
+
+ if verbose:
+ print "4.) Found by title: " + found_by_title
+
+ hits = (
+ _convert_to_array(found_by_hash) +
+ _convert_to_array(found_by_commit_pos) +
+ _convert_to_array(found_by_title))
+ hits = _remove_duplicates(hits)
+
+ if separator:
+ for current_hit in hits:
+ commits_between = _find_commits_inbetween(
+ separator, current_hit, git_working_dir, verbose)
+ if commits_between != "":
+ return hits
+ return []
+
+ return hits
+
+def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
+ commits_between = git_execute(
+ git_working_dir,
+ ["rev-list", "--reverse", start_hash + ".." + end_hash],
+ verbose)
+ return commits_between.strip()
+
+def _convert_to_array(string_of_hashes):
+ return string_of_hashes.splitlines()
+
+def _remove_duplicates(array):
+ no_duplicates = []
+ for current in array:
+ if not current in no_duplicates:
+ no_duplicates.append(current)
+ return no_duplicates
+
+def git_execute(working_dir, args, verbose=False):
+ command = ["git", "-C", working_dir] + args
+ if verbose:
+ print "Git working dir: " + working_dir
+ print "Executing git command:" + str(command)
+ p = Popen(args=command, stdin=PIPE,
+ stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate()
+ rc = p.returncode
+ if rc != 0:
+ raise Exception(err)
+ if verbose:
+ print "Git return value: " + output
+ return output
+
+def _pretty_print_entry(hash, git_dir, pre_text, verbose):
+ text_to_print = git_execute(
+ git_dir,
+ ["show",
+ "--quiet",
+ "--date=iso",
+ hash,
+ "--format=%ad # %H # %s"],
+ verbose)
+ return pre_text + text_to_print.strip()
+
+def main(options):
+ all_related_commits = search_all_related_commits(
+ options.git_dir,
+ options.of[0],
+ options.until[0],
+ options.separator,
+ options.verbose)
+
+ sort_key = lambda x: (
+ git_execute(
+ options.git_dir,
+ ["show", "--quiet", "--date=iso", x, "--format=%ad"],
+ options.verbose)).strip()
+
+ high_level_commits = sorted(all_related_commits.keys(), key=sort_key)
+
+ for current_key in high_level_commits:
+ if options.prettyprint:
+ yield _pretty_print_entry(
+ current_key,
+ options.git_dir,
+ "+",
+ options.verbose)
+ else:
+ yield "+" + current_key
+
+ found_commits = all_related_commits[current_key]
+ for current_commit in found_commits:
+ if options.prettyprint:
+ yield _pretty_print_entry(
+ current_commit,
+ options.git_dir,
+ "| ",
+ options.verbose)
+ else:
+ yield "| " + current_commit
+
+if __name__ == "__main__": # pragma: no cover
+ parser = argparse.ArgumentParser(
+ "This tool analyzes the commit range between <of> and <until>. "
+ "It finds commits which belong together e.g. Implement/Revert pairs and "
+ "Implement/Port/Revert triples. All supplied hashes need to be "
+ "from the same branch e.g. master.")
+ parser.add_argument("-g", "--git-dir", required=False, default=".",
+ help="The path to your git working directory.")
+ parser.add_argument("--verbose", action="store_true",
+ help="Enables a very verbose output")
+ parser.add_argument("of", nargs=1,
+ help="Hash of the commit to be searched.")
+ parser.add_argument("until", nargs=1,
+ help="Commit when searching should stop")
+ parser.add_argument("--separator", required=False,
+ help="The script will only list related commits "
+ "which are separated by hash <--separator>.")
+ parser.add_argument("--prettyprint", action="store_true",
+ help="Pretty prints the output")
+
+ args = sys.argv[1:]
+ options = parser.parse_args(args)
+ for current_line in main(options):
+ print current_line
diff --git a/tools/release/test_mergeinfo.py b/tools/release/test_mergeinfo.py
new file mode 100755
index 00000000..d455fa23
--- /dev/null
+++ b/tools/release/test_mergeinfo.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mergeinfo
+import shutil
+import unittest
+
+from collections import namedtuple
+from os import path
+from subprocess import Popen, PIPE, check_call
+
+TEST_CONFIG = {
+ "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestMergeInfo(unittest.TestCase):
+
+ base_dir = TEST_CONFIG["GIT_REPO"]
+
+ def _execute_git(self, git_args):
+
+ fullCommand = ["git", "-C", self.base_dir] + git_args
+ p = Popen(args=fullCommand, stdin=PIPE,
+ stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate()
+ rc = p.returncode
+ if rc != 0:
+ raise Exception(err)
+ return output
+
+ def setUp(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ check_call(["git", "init", self.base_dir])
+
+ # Initial commit
+ message = '''Initial commit'''
+
+ self._make_empty_commit(message)
+
+ def tearDown(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ def _assert_correct_standard_result(
+ self, result, all_commits, hash_of_first_commit):
+ self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertTrue(
+ result.get(hash_of_first_commit),
+ "Master commit is wrong")
+
+ self.assertEqual(
+ len(result[hash_of_first_commit]),
+ 1,
+ "Child commit not found")
+ self.assertEqual(
+ all_commits[2],
+ result[hash_of_first_commit][0],
+ "Child commit wrong")
+
+ def _get_commits(self):
+ commits = self._execute_git(
+ ["log", "--format=%H", "--reverse"]).splitlines()
+ return commits
+
+ def _make_empty_commit(self, message):
+ self._execute_git(["commit", "--allow-empty", "-m", message])
+ return self._get_commits()[-1]
+
+ def testCanDescribeCommit(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit).splitlines()
+
+ self.assertEqual(
+ result[0],
+ 'commit ' + hash_of_first_commit)
+
+ def testCanDescribeCommitSingleLine(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit, True).splitlines()
+
+ self.assertEqual(
+ str(result[0]),
+ str(hash_of_first_commit[0:7]) + ' Initial commit')
+
+ def testSearchFollowUpCommits(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+ self.assertEqual(set(followups), set(commits[1:]))
+
+ def testSearchMerges(self):
+ self._execute_git(['branch', 'test'])
+ self._execute_git(['checkout', 'master'])
+ message = 'real initial commit'
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self._execute_git(['checkout', 'test'])
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ # This should be found
+ message = 'Merge ' + hash_of_first_commit
+ hash_of_hit = self._make_empty_commit(message)
+
+ # This should be ignored
+ message = 'Cr-Branched-From: ' + hash_of_first_commit
+ hash_of_ignored = self._make_empty_commit(message)
+
+ self._execute_git(['checkout', 'master'])
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+
+ # Check if follow ups and merges are not overlapping
+ self.assertEqual(len(followups), 0)
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ hash_of_followup = self._make_empty_commit(message)
+
+ merges = mergeinfo.get_merge_commits(self.base_dir, hash_of_first_commit)
+ # Check if follow up is ignored
+ self.assertTrue(hash_of_followup not in merges)
+
+ # Check for proper return of merges
+ self.assertTrue(hash_of_hit in merges)
+ self.assertTrue(hash_of_ignored not in merges)
+
+ def testIsLkgr(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+ self._make_empty_commit('This one is the lkgr head')
+ self._execute_git(['branch', 'remotes/origin/lkgr'])
+ hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr')
+
+ self.assertTrue(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_first_commit))
+ self.assertFalse(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_not_lkgr))
+
+ def testShowFirstCanary(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), 'No Canary coverage')
+
+ self._execute_git(['branch', 'remotes/origin/chromium/2345'])
+ self._execute_git(['branch', 'remotes/origin/chromium/2346'])
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), '2345')
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/release/test_scripts.py
index db702a3a..4a3cb5b2 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/release/test_scripts.py
@@ -33,39 +33,37 @@ import traceback
import unittest
import auto_push
-from auto_push import CheckLastPush
+from auto_push import LastReleaseBailout
import auto_roll
import common_includes
from common_includes import *
+import create_release
+from create_release import CreateRelease
import merge_to_branch
from merge_to_branch import *
-import push_to_trunk
-from push_to_trunk import *
+import push_to_candidates
+from push_to_candidates import *
import chromium_roll
from chromium_roll import ChromiumRoll
import releases
from releases import Releases
-import bump_up_version
-from bump_up_version import BumpUpVersion
-from bump_up_version import LastChangeBailout
-from bump_up_version import LKGRVersionUpToDateBailout
from auto_tag import AutoTag
TEST_CONFIG = {
"DEFAULT_CWD": None,
"BRANCHNAME": "test-prepare-push",
- "TRUNKBRANCH": "test-trunk-push",
- "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
- "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
- "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
- "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
- "CHROMIUM": "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+ "CANDIDATESBRANCH": "test-candidates-push",
+ "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-candidates-tempfile",
+ "CHANGELOG_ENTRY_FILE":
+ "/tmp/test-v8-push-to-candidates-tempfile-changelog-entry",
+ "PATCH_FILE": "/tmp/test-v8-push-to-candidates-tempfile-patch",
+ "COMMITMSG_FILE": "/tmp/test-v8-push-to-candidates-tempfile-commitmsg",
+ "CHROMIUM": "/tmp/test-v8-push-to-candidates-tempfile-chromium",
"SETTINGS_LOCATION": None,
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
- "CLUSTERFUZZ_API_KEY_FILE": "/tmp/test-fake-cf-api-key",
}
@@ -76,6 +74,30 @@ AUTO_PUSH_ARGS = [
class ToplevelTest(unittest.TestCase):
+ def testSaniniziteVersionTags(self):
+ self.assertEquals("4.8.230", SanitizeVersionTag("4.8.230"))
+ self.assertEquals("4.8.230", SanitizeVersionTag("tags/4.8.230"))
+ self.assertEquals(None, SanitizeVersionTag("candidate"))
+
+ def testNormalizeVersionTags(self):
+ input = ["4.8.230",
+ "tags/4.8.230",
+ "tags/4.8.224.1",
+ "4.8.224.1",
+ "4.8.223.1",
+ "tags/4.8.223",
+ "tags/4.8.231",
+ "candidates"]
+ expected = ["4.8.230",
+ "4.8.230",
+ "4.8.224.1",
+ "4.8.224.1",
+ "4.8.223.1",
+ "4.8.223",
+ "4.8.231",
+ ]
+ self.assertEquals(expected, NormalizeVersionTags(input))
+
def testSortBranches(self):
S = releases.SortBranches
self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2])
@@ -355,19 +377,19 @@ class ScriptTest(unittest.TestCase):
return name
- def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
+ def WriteFakeVersionFile(self, major=3, minor=22, build=4, patch=0):
version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
if not os.path.exists(os.path.dirname(version_file)):
os.makedirs(os.path.dirname(version_file))
with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
- f.write("#define MAJOR_VERSION 3\n")
- f.write("#define MINOR_VERSION %s\n" % minor)
- f.write("#define BUILD_NUMBER %s\n" % build)
- f.write("#define PATCH_LEVEL %s\n" % patch)
+ f.write("#define V8_MAJOR_VERSION %s\n" % major)
+ f.write("#define V8_MINOR_VERSION %s\n" % minor)
+ f.write("#define V8_BUILD_NUMBER %s\n" % build)
+ f.write("#define V8_PATCH_LEVEL %s\n" % patch)
f.write(" // Some line...\n")
- f.write("#define IS_CANDIDATE_VERSION 0\n")
+ f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
def MakeStep(self):
"""Convenience wrapper."""
@@ -376,7 +398,7 @@ class ScriptTest(unittest.TestCase):
config=TEST_CONFIG, side_effect_handler=self,
options=options)
- def RunStep(self, script=PushToTrunk, step_class=Step, args=None):
+ def RunStep(self, script=PushToCandidates, step_class=Step, args=None):
"""Convenience wrapper."""
args = args if args is not None else ["-m"]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
@@ -398,11 +420,6 @@ class ScriptTest(unittest.TestCase):
else:
return self._mock.Call("readurl", url)
- def ReadClusterFuzzAPI(self, api_key, **params):
- # TODO(machenbach): Use a mock for this and add a test that stops rolling
- # due to clustefuzz results.
- return []
-
def Sleep(self, seconds):
pass
@@ -410,7 +427,7 @@ class ScriptTest(unittest.TestCase):
return "1999-07-31"
def GetUTCStamp(self):
- return "100000"
+ return "1000000"
def Expect(self, *args):
"""Convenience wrapper."""
@@ -444,7 +461,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -452,24 +469,22 @@ class ScriptTest(unittest.TestCase):
])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
- self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -477,7 +492,6 @@ class ScriptTest(unittest.TestCase):
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
@@ -529,10 +543,10 @@ class ScriptTest(unittest.TestCase):
" too much\n"
" trailing", cl)
- self.assertEqual("//\n#define BUILD_NUMBER 3\n",
- MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ self.assertEqual("//\n#define V8_BUILD_NUMBER 3\n",
+ MSub(r"(?<=#define V8_BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>3",
- "//\n#define BUILD_NUMBER 321\n"))
+ "//\n#define V8_BUILD_NUMBER 321\n"))
def testPreparePushRevision(self):
# Tests the default push hash used when the --revision option is not set.
@@ -540,7 +554,7 @@ class ScriptTest(unittest.TestCase):
Cmd("git log -1 --format=%H HEAD", "push_hash")
])
- self.RunStep(PushToTrunk, PreparePushRevision)
+ self.RunStep(PushToCandidates, PreparePushRevision)
self.assertEquals("push_hash", self._state["push_hash"])
def testPrepareChangeLog(self):
@@ -567,10 +581,10 @@ class ScriptTest(unittest.TestCase):
Cmd("git log -1 --format=%an rev4", "author4@chromium.org"),
])
- self._state["last_push_bleeding_edge"] = "1234"
+ self._state["last_push_master"] = "1234"
self._state["push_hash"] = "push_hash"
self._state["version"] = "3.22.5"
- self.RunStep(PushToTrunk, PrepareChangeLog)
+ self.RunStep(PushToCandidates, PrepareChangeLog)
actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
@@ -611,27 +625,30 @@ class ScriptTest(unittest.TestCase):
Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
])
- self.RunStep(PushToTrunk, EditChangeLog)
+ self.RunStep(PushToCandidates, EditChangeLog)
self.assertEquals("New\n Lines",
FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
- # Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
- # Make sure that the increment is 3.22.7.0.
- def testIncrementVersion(self):
- self.WriteFakeVersionFile()
- self._state["last_push_trunk"] = "hash1"
- self._state["latest_build"] = "6"
- self._state["latest_version"] = "3.22.6.0"
+ TAGS = """
+4425.0
+0.0.0.0
+3.9.6
+3.22.4
+test_tag
+"""
+ # Version as tag: 3.22.4.0. Version on master: 3.22.6.
+ # Make sure that the latest version is 3.22.6.0.
+ def testIncrementVersion(self):
self.Expect([
- Cmd("git checkout -f hash1 -- src/version.cc", ""),
- Cmd("git checkout -f origin/master -- src/version.cc",
- "", cb=lambda: self.WriteFakeVersionFile(22, 6)),
- RL("Y"), # Increment build number.
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
+ "", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
- self.RunStep(PushToTrunk, IncrementVersion)
+ self.RunStep(PushToCandidates, IncrementVersion)
self.assertEquals("3", self._state["new_major"])
self.assertEquals("22", self._state["new_minor"])
@@ -650,7 +667,7 @@ class ScriptTest(unittest.TestCase):
self._state["push_hash"] = "hash1"
self._state["date"] = "1999-11-11"
- self.RunStep(PushToTrunk, SquashCommits)
+ self.RunStep(PushToCandidates, SquashCommits)
self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
patch = FileToText(TEST_CONFIG["PATCH_FILE"])
@@ -704,49 +721,59 @@ Performance and stability improvements on all platforms."""
])
FakeScript(fake_config, self).Run(["--work-dir", work_dir])
- def _PushToTrunk(self, force=False, manual=False):
+ def _PushToCandidates(self, force=False, manual=False):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- # The version file on bleeding edge has build level 5, while the version
- # file from trunk has build level 4.
+ # The version file on master has build level 5, while the version
+ # file from candidates has build level 4.
self.WriteFakeVersionFile(build=5)
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
- bleeding_edge_change_log = "2014-03-17: Sentinel\n"
- TextToFile(bleeding_edge_change_log,
+ master_change_log = "2014-03-17: Sentinel\n"
+ TextToFile(master_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
os.environ["EDITOR"] = "vi"
+ commit_msg_squashed = """Version 3.22.5 (squashed - based on push_hash)
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms."""
+
+ commit_msg = """Version 3.22.5 (based on push_hash)
+
+Log text 1 (issue 321).
+
+Performance and stability improvements on all platforms."""
+
def ResetChangeLog():
- """On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
- the ChangLog will be reset to its content on trunk."""
- trunk_change_log = """1999-04-05: Version 3.22.4
+ """On 'git co -b new_branch origin/candidates',
+ and 'git checkout -- ChangeLog',
+ the ChangLog will be reset to its content on candidates."""
+ candidates_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
- TextToFile(trunk_change_log,
+ TextToFile(candidates_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
- def ResetToTrunk():
+ def ResetToCandidates():
ResetChangeLog()
self.WriteFakeVersionFile()
- def CheckSVNCommit():
+ def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
- self.assertEquals(
-"""Version 3.22.5 (based on push_hash)
-
-Log text 1 (issue 321).
-
-Performance and stability improvements on all platforms.""", commit)
+ self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
- self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
- self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
- self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
- self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
-
- # Check that the change log on the trunk branch got correctly modified.
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
+
+ # Check that the change log on the candidates branch got correctly
+ # modified.
change_log = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
@@ -768,31 +795,19 @@ Performance and stability improvements on all platforms.""", commit)
expectations.append(Cmd("which vi", "/usr/bin/vi"))
expectations += [
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd(("git new-branch %s --upstream origin/master" %
- TEST_CONFIG["BRANCHNAME"]),
- ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
- "origin/candidates"), "hash2\n"),
- Cmd("git log -1 hash2", "Log message\n"),
- ]
- if manual:
- expectations.append(RL("Y")) # Confirm last push.
- expectations += [
- Cmd("git log -1 --format=%s hash2",
- "Version 3.4.5 (based on abc3)\n"),
- Cmd("git checkout -f origin/master -- src/version.cc",
+ TEST_CONFIG["BRANCHNAME"]), ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
- Cmd("git checkout -f hash2 -- src/version.cc", "",
- cb=self.WriteFakeVersionFile),
- ]
- if manual:
- expectations.append(RL("")) # Increment build number.
- expectations += [
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
@@ -808,28 +823,35 @@ Performance and stability improvements on all platforms.""", commit)
Cmd("git checkout -f origin/master", ""),
Cmd("git diff origin/candidates push_hash", "patch content\n"),
Cmd(("git new-branch %s --upstream origin/candidates" %
- TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
+ TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
- Cmd("git checkout -f origin/candidates -- src/version.cc", "",
+ Cmd("git checkout -f origin/candidates -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
- Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
- cb=CheckSVNCommit),
+ Cmd("git commit -am \"%s\"" % commit_msg_squashed, ""),
]
if manual:
expectations.append(RL("Y")) # Sanity check.
expectations += [
Cmd("git cl land -f --bypass-hooks", ""),
+ Cmd("git checkout -f master", ""),
+ Cmd("git fetch", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
+ Cmd(("git new-branch %s --upstream origin/candidates" %
+ TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+ cb=CheckVersionCommit),
+ Cmd("git cl land -f --bypass-hooks", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
"\"Version 3.22.5 (based on push_hash)\""
" origin/candidates", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin 3.22.5", ""),
- Cmd("git checkout -f some_branch", ""),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
- Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
]
self.Expect(expectations)
@@ -837,7 +859,7 @@ Performance and stability improvements on all platforms.""", commit)
if force: args.append("-f")
if manual: args.append("-m")
else: args += ["-r", "reviewer@chromium.org"]
- PushToTrunk(TEST_CONFIG, self).Run(args)
+ PushToCandidates(TEST_CONFIG, self).Run(args)
cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
@@ -845,17 +867,123 @@ Performance and stability improvements on all platforms.""", commit)
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
# Note: The version file is on build number 5 again in the end of this test
- # since the git command that merges to the bleeding edge branch is mocked
- # out.
+ # since the git command that merges to master is mocked out.
+
+ def testPushToCandidatesManual(self):
+ self._PushToCandidates(manual=True)
+
+ def testPushToCandidatesSemiAutomatic(self):
+ self._PushToCandidates()
+
+ def testPushToCandidatesForced(self):
+ self._PushToCandidates(force=True)
+
+ def testCreateRelease(self):
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+
+ # The version file on master has build level 5.
+ self.WriteFakeVersionFile(build=5)
+
+ master_change_log = "2014-03-17: Sentinel\n"
+ TextToFile(master_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+
+ commit_msg = """Version 3.22.5
+
+Log text 1 (issue 321).
- def testPushToTrunkManual(self):
- self._PushToTrunk(manual=True)
+Performance and stability improvements on all platforms."""
+
+ def ResetChangeLog():
+ last_change_log = """1999-04-05: Version 3.22.4
- def testPushToTrunkSemiAutomatic(self):
- self._PushToTrunk()
+ Performance and stability improvements on all platforms.\n"""
+ TextToFile(last_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
- def testPushToTrunkForced(self):
- self._PushToTrunk(force=True)
+
+ def CheckVersionCommit():
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+ self.assertEquals(commit_msg, commit)
+ version = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
+
+ # Check that the change log on the candidates branch got correctly
+ # modified.
+ change_log = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+ self.assertEquals(
+"""1999-07-31: Version 3.22.5
+
+ Log text 1 (issue 321).
+
+ Performance and stability improvements on all platforms.
+
+
+1999-04-05: Version 3.22.4
+
+ Performance and stability improvements on all platforms.\n""",
+ change_log)
+
+ expectations = [
+ Cmd("git fetch origin "
+ "+refs/heads/*:refs/heads/* "
+ "+refs/pending/*:refs/pending/* "
+ "+refs/pending-tags/*:refs/pending-tags/*", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
+ "", cb=self.WriteFakeVersionFile),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
+ Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
+ Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
+ Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
+ Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
+ Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
+ Cmd("git reset --hard origin/master", ""),
+ Cmd("git checkout -b work-branch push_hash", ""),
+ Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
+ Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
+ cb=self.WriteFakeVersionFile),
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+ cb=CheckVersionCommit),
+ Cmd("git push origin "
+ "refs/heads/work-branch:refs/pending/heads/3.22.5 "
+ "push_hash:refs/pending-tags/heads/3.22.5 "
+ "push_hash:refs/heads/3.22.5", ""),
+ Cmd("git fetch", ""),
+ Cmd("git log -1 --format=%H --grep="
+ "\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
+ Cmd("git tag 3.22.5 hsh_to_tag", ""),
+ Cmd("git push origin 3.22.5", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch", "* master\n work-branch\n"),
+ Cmd("git branch -D work-branch", ""),
+ Cmd("git gc", ""),
+ ]
+ self.Expect(expectations)
+
+ args = ["-a", "author@chromium.org",
+ "-r", "reviewer@chromium.org",
+ "--revision", "push_hash"]
+ CreateRelease(TEST_CONFIG, self).Run(args)
+
+ cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
+ self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
+
+ # Note: The version file is on build number 5 again in the end of this test
+ # since the git command that merges to master is mocked out.
C_V8_22624_LOG = """V8 CL.
@@ -875,93 +1003,140 @@ git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
- def testChromiumRoll(self):
- googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG["PERSISTFILE_BASENAME"]
- with open(googlers_mapping_py, "w") as f:
- f.write("""
-def list_to_dict(entries):
- return {"g_name@google.com": "c_name@chromium.org"}
-def get_list():
- pass""")
+ ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
+
+Summary of changes available at:
+https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
+
+Please follow these instructions for assigning/CC'ing issues:
+https://github.com/v8/v8/wiki/Triaging%20issues
+
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.
+
+TBR=reviewer@chromium.org"""
+
+ # Snippet from the original DEPS file.
+ FAKE_DEPS = """
+vars = {
+ "v8_revision": "last_roll_hsh",
+}
+deps = {
+ "src/v8":
+ (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
+ Var("v8_revision"),
+}
+"""
+
+ def testChromiumRollUpToDate(self):
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+ self.Expect([
+ Cmd("git fetch origin", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git describe --tags last_roll_hsh", "3.22.4"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ ])
+
+ result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
+ AUTO_PUSH_ARGS + [
+ "-c", TEST_CONFIG["CHROMIUM"],
+ "--json-output", json_output_file])
+ self.assertEquals(0, result)
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("up_to_date", json_output["monitoring_state"])
+
+ def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
os.makedirs(os.path.join(chrome_dir, "v8"))
- # Write fake deps file.
- TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
- os.path.join(chrome_dir, "DEPS"))
def WriteDeps():
TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
os.path.join(chrome_dir, "DEPS"))
expectations = [
Cmd("git fetch origin", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%s push_hash",
- "Version 3.22.5 (based on bleeding_edge revision r22622)\n"),
- URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
- "document.write('g_name')"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git branch", "", cwd=chrome_dir),
Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
- Cmd("git new-branch v8-roll-push_hash", "", cwd=chrome_dir),
- Cmd("roll-dep v8 push_hash", "rolled", cb=WriteDeps, cwd=chrome_dir),
- Cmd(("git commit -am \"Update V8 to version 3.22.5 "
- "(based on bleeding_edge revision r22622).\n\n"
- "Please reply to the V8 sheriff c_name@chromium.org in "
- "case of problems.\n\nTBR=c_name@chromium.org\" "
- "--author \"author@chromium.org <author@chromium.org>\""),
+ Cmd("git new-branch work-branch", "", cwd=chrome_dir),
+ Cmd("roll-dep-svn v8 roll_hsh", "rolled", cb=WriteDeps, cwd=chrome_dir),
+ Cmd(("git commit -am \"%s\" "
+ "--author \"author@chromium.org <author@chromium.org>\"" %
+ self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
- cwd=chrome_dir),
+ Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+ "--use-commit-queue", "", cwd=chrome_dir),
+ Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
- "--sheriff", "--googlers-mapping", googlers_mapping_py,
- "-r", "reviewer@chromium.org"]
- ChromiumRoll(TEST_CONFIG, self).Run(args)
+ "-r", "reviewer@chromium.org", "--json-output", json_output_file]
+ auto_roll.AutoRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("success", json_output["monitoring_state"])
+
def testCheckLastPushRecently(self):
self.Expect([
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
- "origin/candidates"), "hash2\n"),
- Cmd("git log -1 --format=%s hash2",
- "Version 3.4.5 (based on abc123)\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
+ Cmd("git log --format=%H abc3..abc123", "\n"),
])
self._state["candidate"] = "abc123"
self.assertEquals(0, self.RunStep(
- auto_push.AutoPush, CheckLastPush, AUTO_PUSH_ARGS))
+ auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
def testAutoPush(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
-
self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled\"}"),
- Cmd("git fetch origin +refs/heads/candidate:refs/heads/candidate", ""),
- Cmd("git show-ref -s refs/heads/candidate", "abc123\n"),
- Cmd(("git log -1 --format=%H --grep=\""
- "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
- " origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%s push_hash",
- "Version 3.4.5 (based on abc101)\n"),
+ Cmd("git fetch origin +refs/heads/lkgr:refs/heads/lkgr", ""),
+ Cmd("git show-ref -s refs/heads/lkgr", "abc123\n"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git tag", self.TAGS),
+ Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+ Cmd("git log -1 --format=%s release_hash",
+ "Version 3.22.4 (based on abc3)\n"),
+ Cmd("git log --format=%H abc3..abc123", "some_stuff\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
@@ -971,103 +1146,6 @@ def get_list():
self.assertEquals("abc123", state["candidate"])
- def testAutoPushStoppedBySettings(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = self.MakeEmptyTempFile()
- TextToFile("{\"enable_auto_push\": false}",
- TEST_CONFIG["SETTINGS_LOCATION"])
-
- self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
- Cmd("git fetch", ""),
- ])
-
- def RunAutoPush():
- auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
- self.assertRaises(Exception, RunAutoPush)
-
- def testAutoPushStoppedByTreeStatus(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
-
- self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
- Cmd("git fetch", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled (no push)\"}"),
- ])
-
- def RunAutoPush():
- auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
- self.assertRaises(Exception, RunAutoPush)
-
- def testAutoRollExistingRoll(self):
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"},"
- "{\"subject\": \"Update V8 to Version...\"}]}")),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- # Snippet from the original DEPS file.
- FAKE_DEPS = """
-vars = {
- "v8_revision": "abcd123455",
-}
-deps = {
- "src/v8":
- (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
- Var("v8_revision"),
-}
-"""
-
- def testAutoRollUpToDate(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
- Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- def testAutoRoll(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
- TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"] = self.MakeEmptyTempFile()
- TextToFile("fake key", TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"])
-
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch", ""),
- Cmd(("git log -1 --format=%H --grep="
- "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
- "origin/candidates"), "push_hash\n"),
- Cmd("git log -1 --format=%B push_hash", self.C_V8_123456_LOG),
- Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
- self.assertEquals(0, result)
-
def testMergeToBranch(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
@@ -1101,22 +1179,23 @@ BUG=123,234,345,456,567,v8:123
LOG=N
"""
- def VerifySVNCommit():
+ def VerifyLand():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
- self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
- self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
- self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+1", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
- Cmd("git new-branch %s --upstream origin/candidates" %
+ Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
"--reverse origin/master"),
@@ -1174,20 +1253,20 @@ LOG=N
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
- cb=VerifySVNCommit),
+ cb=VerifyLand),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
- "\" origin/candidates",
+ "\" refs/remotes/origin/candidates",
""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
- "\" origin/candidates",
+ "\" refs/remotes/origin/candidates",
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin 3.22.5.1", ""),
- Cmd("git checkout -f some_branch", ""),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
@@ -1205,6 +1284,10 @@ LOG=N
MergeToBranch(TEST_CONFIG, self).Run(args)
def testReleases(self):
+ c_hash1_commit_log = """Update V8 to Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/master@{#5678}
+"""
c_hash2_commit_log = """Revert something.
BUG=12345
@@ -1241,6 +1324,11 @@ git-svn-id: googlecode@123 0039-1c4b
Cr-Commit-Position: refs/heads/candidates@{#345}
"""
+ c_hash_456_commit_log = """Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/4.2.71@{#1}
+"""
+ c_deps = "Line\n \"v8_revision\": \"%s\",\n line\n"
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
@@ -1250,94 +1338,109 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
chrome_dir = TEST_CONFIG["CHROMIUM"]
chrome_v8_dir = os.path.join(chrome_dir, "v8")
os.makedirs(chrome_v8_dir)
- def WriteDEPS(revision):
- TextToFile("Line\n \"v8_revision\": \"%s\",\n line\n" % revision,
- os.path.join(chrome_dir, "DEPS"))
- WriteDEPS(567)
- def ResetVersion(minor, build, patch=0):
- return lambda: self.WriteFakeVersionFile(minor=minor,
+ def ResetVersion(major, minor, build, patch=0):
+ return lambda: self.WriteFakeVersionFile(major=major,
+ minor=minor,
build=build,
patch=patch)
- def ResetDEPS(revision):
- return lambda: WriteDEPS(revision)
-
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
- Cmd("git branch -r", " branch-heads/3.21\n branch-heads/3.3\n"),
- Cmd("git reset --hard branch-heads/3.3", ""),
- Cmd("git log --format=%H", "hash1\nhash_234"),
- Cmd("git diff --name-only hash1 hash1^", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
+ Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
+ Cmd("git describe --tags hash_234", "3.3.1.1"),
+ Cmd("git describe --tags hash_123", "3.21.2"),
+ Cmd("git describe --tags hash_345", "3.22.3"),
+ Cmd("git describe --tags hash_456", "4.2.71"),
Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(3, 1, 1)),
+ cb=ResetVersion(3, 3, 1, 1)),
+ Cmd("git branch -r --contains hash_234", " branch-heads/3.3\n"),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%s hash_234", ""),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%ci hash_234", "18:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard branch-heads/3.21", ""),
- Cmd("git log --format=%H", "hash_123\nhash4\nhash5\n"),
+ cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(21, 2)),
+ cb=ResetVersion(3, 21, 2)),
+ Cmd("git branch -r --contains hash_123", " branch-heads/3.21\n"),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%s hash_123", ""),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%ci hash_123", "03:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard origin/candidates", ""),
- Cmd("git log --format=%H", "hash_345\n"),
+ cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 3)),
+ cb=ResetVersion(3, 22, 3)),
+ Cmd("git branch -r --contains hash_345", " origin/candidates\n"),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%s hash_345", ""),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%ci hash_345", ""),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
- cb=ResetVersion(22, 5)),
- Cmd("git reset --hard origin/master", ""),
- Cmd("git status -s -uno", "", cwd=chrome_dir),
- Cmd("git checkout -f master", "", cwd=chrome_dir),
- Cmd("git pull", "", cwd=chrome_dir),
- Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
+ Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(4, 2, 71)),
+ Cmd("git branch -r --contains hash_456", " origin/4.2.71\n"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
+ Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
+ Cmd("git log -1 --format=%H hash_456^", "master_456"),
+ Cmd("git log -1 --format=%B master_456",
+ "Cr-Commit-Position: refs/heads/master@{#456}"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%ci hash_456", "02:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git fetch origin +refs/heads/*:refs/remotes/origin/* "
+ "+refs/branch-heads/*:refs/remotes/branch-heads/*", "",
cwd=chrome_dir),
Cmd("git fetch origin", "", cwd=chrome_v8_dir),
- Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
+ Cmd("git log --format=%H --grep=\"V8\" origin/master -- DEPS",
+ "c_hash1\nc_hash2\nc_hash3\n",
cwd=chrome_dir),
- Cmd("git diff --name-only c_hash1 c_hash1^", "", cwd=chrome_dir),
- Cmd("git diff --name-only c_hash2 c_hash2^", "DEPS", cwd=chrome_dir),
- Cmd("git checkout -f c_hash2 -- DEPS", "",
- cb=ResetDEPS("0123456789012345678901234567890123456789"),
+ Cmd("git show c_hash1:DEPS", c_deps % "hash_456", cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
cwd=chrome_dir),
+ Cmd("git show c_hash2:DEPS", c_deps % "hash_345", cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
cwd=chrome_dir),
- Cmd("git log -1 --format=%B 0123456789012345678901234567890123456789",
- self.C_V8_22624_LOG, cwd=chrome_v8_dir),
- Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
- Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS(345),
- cwd=chrome_dir),
+ Cmd("git show c_hash3:DEPS", c_deps % "deadbeef", cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
cwd=chrome_dir),
- Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
- cwd=chrome_dir),
Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir),
- Cmd("git checkout -f branch-heads/7 -- DEPS", "", cb=ResetDEPS(345),
- cwd=chrome_dir),
- Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+ Cmd("git show refs/branch-heads/7:DEPS", c_deps % "hash_345",
cwd=chrome_dir),
- Cmd("git checkout -f master", "", cwd=chrome_dir),
- Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
- Cmd("git checkout -f some_branch", ""),
- Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ URL("http://omahaproxy.appspot.com/all.json", """[{
+ "os": "win",
+ "versions": [{
+ "version": "2.2.2.2",
+ "v8_version": "22.2.2.2",
+ "current_reldate": "04/09/15",
+ "os": "win",
+ "channel": "canary",
+ "previous_version": "1.1.1.0"
+ }]
+ }]"""),
+ URL("http://omahaproxy.appspot.com/v8.json?version=1.1.1.0", """{
+ "chromium_version": "1.1.1.0",
+ "v8_version": "11.1.1.0"
+ }"""),
+ Cmd("git rev-list -1 11.1.1", "v8_previous_version_hash"),
+ Cmd("git rev-list -1 22.2.2.2", "v8_version_hash"),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "")
])
args = ["-c", TEST_CONFIG["CHROMIUM"],
@@ -1347,20 +1450,47 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
Releases(TEST_CONFIG, self).Run(args)
# Check expected output.
- csv = ("3.22.3,candidates,345,3456:4566,\r\n"
+ csv = ("4.2.71,4.2.71,1,5678,\r\n"
+ "3.22.3,candidates,345,4567:5677,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,abc12\r\n")
self.assertEquals(csv, FileToText(csv_output))
- expected_json = [
+ expected_json = {"chrome_releases":{
+ "canaries": [
+ {
+ "chrome_version": "2.2.2.2",
+ "os": "win",
+ "release_date": "04/09/15",
+ "v8_version": "22.2.2.2",
+ "v8_version_hash": "v8_version_hash",
+ "v8_previous_version": "11.1.1.0",
+ "v8_previous_version_hash": "v8_previous_version_hash"
+ }]},
+ "releases":[
+ {
+ "revision": "1",
+ "revision_git": "hash_456",
+ "master_position": "456",
+ "master_hash": "master_456",
+ "patches_merged": "",
+ "version": "4.2.71",
+ "chromium_revision": "5678",
+ "branch": "4.2.71",
+ "review_link": "",
+ "date": "02:15",
+ "chromium_branch": "",
+ # FIXME(machenbach): Fix revisions link for git.
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=1",
+ },
{
"revision": "345",
"revision_git": "hash_345",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"patches_merged": "",
"version": "3.22.3",
- "chromium_revision": "3456:4566",
+ "chromium_revision": "4567:5677",
"branch": "candidates",
"review_link": "",
"date": "",
@@ -1371,8 +1501,8 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
"revision": "123",
"revision_git": "hash_123",
"patches_merged": "",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"version": "3.21.2",
"chromium_revision": "",
"branch": "3.21",
@@ -1385,8 +1515,8 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
"revision": "234",
"revision_git": "hash_234",
"patches_merged": "abc12",
- "bleeding_edge": "",
- "bleeding_edge_git": "",
+ "master_position": "",
+ "master_hash": "",
"version": "3.3.1.1",
"chromium_revision": "",
"branch": "3.3",
@@ -1394,100 +1524,11 @@ Cr-Commit-Position: refs/heads/candidates@{#345}
"date": "18:15",
"chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=234",
- },
- ]
+ },],
+ }
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
- def _bumpUpVersion(self):
- self.WriteFakeVersionFile()
-
- def ResetVersion(minor, build, patch=0):
- return lambda: self.WriteFakeVersionFile(minor=minor,
- build=build,
- patch=patch)
-
- return [
- Cmd("git status -s -uno", ""),
- Cmd("git checkout -f master", "", cb=ResetVersion(11, 4)),
- Cmd("git pull", ""),
- Cmd("git branch", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git log -1 --format=%H", "latest_hash"),
- Cmd("git diff --name-only latest_hash latest_hash^", ""),
- URL("https://v8-status.appspot.com/lkgr", "12345"),
- Cmd("git checkout -f master", ""),
- Cmd(("git log --format=%H --grep="
- "\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\""),
- "lkgr_hash"),
- Cmd("git new-branch auto-bump-up-version --upstream lkgr_hash", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git branch", "auto-bump-up-version\n* master"),
- Cmd("git branch -D auto-bump-up-version", ""),
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
- Cmd("git checkout -f candidates", "", cb=ResetVersion(11, 5)),
- Cmd("git pull", ""),
- URL("https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is open\"}"),
- Cmd("git new-branch auto-bump-up-version --upstream master", "",
- cb=ResetVersion(11, 4)),
- Cmd("git commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
- "TBR=author@chromium.org\" "
- "--author \"author@chromium.org <author@chromium.org>\"", ""),
- ]
-
- def testBumpUpVersionGit(self):
- expectations = self._bumpUpVersion()
- expectations += [
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
- "--bypass-hooks", ""),
- Cmd("git cl land -f --bypass-hooks", ""),
- Cmd("git checkout -f master", ""),
- Cmd("git branch", "auto-bump-up-version\n* master"),
- Cmd("git branch -D auto-bump-up-version", ""),
- ]
- self.Expect(expectations)
-
- BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
-
-
- # Test that we bail out if the last change was a version change.
- def testBumpUpVersionBailout1(self):
- self._state["latest"] = "latest_hash"
-
- self.Expect([
- Cmd("git diff --name-only latest_hash latest_hash^", VERSION_FILE),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"]))
-
- # Test that we bail out if the lkgr was a version change.
- def testBumpUpVersionBailout2(self):
- self._state["lkgr"] = "lkgr_hash"
-
- self.Expect([
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", VERSION_FILE),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
-
- # Test that we bail out if the last version is already newer than the lkgr's
- # version.
- def testBumpUpVersionBailout3(self):
- self._state["lkgr"] = "lkgr_hash"
- self._state["lkgr_version"] = "3.22.4.0"
- self._state["latest_version"] = "3.22.5.0"
-
- self.Expect([
- Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
- ])
-
- self.assertEquals(0,
- self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
-
-
class SystemTest(unittest.TestCase):
def testReload(self):
options = ScriptsBase(
diff --git a/tools/release/test_search_related_commits.py b/tools/release/test_search_related_commits.py
new file mode 100755
index 00000000..cf612361
--- /dev/null
+++ b/tools/release/test_search_related_commits.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+from os import path
+import search_related_commits
+import shutil
+from subprocess import Popen, PIPE, check_call
+import unittest
+
+
+TEST_CONFIG = {
+ "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestSearchRelatedCommits(unittest.TestCase):
+
+ base_dir = TEST_CONFIG["GIT_REPO"]
+
+ def _execute_git(self, git_args):
+
+ fullCommand = ["git", "-C", self.base_dir] + git_args
+ p = Popen(args=fullCommand, stdin=PIPE,
+ stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate()
+ rc = p.returncode
+ if rc != 0:
+ raise Exception(err)
+ return output
+
+ def setUp(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ check_call(["git", "init", self.base_dir])
+
+ # Initial commit
+ message = """[turbofan] Sanitize language mode for javascript operators.
+
+ R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28059}"""
+ self._make_empty_commit(message)
+
+ message = """[crankshaft] Do some stuff
+
+ R=hablich@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243007
+
+ Cr-Commit-Position: refs/heads/master@{#28030}"""
+
+ self._make_empty_commit(message)
+
+ def tearDown(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ def _assert_correct_standard_result(
+ self, result, all_commits, hash_of_first_commit):
+ self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertTrue(
+ result.get(hash_of_first_commit),
+ "Master commit is wrong")
+
+ self.assertEqual(
+ len(result[hash_of_first_commit]),
+ 1,
+ "Child commit not found")
+ self.assertEqual(
+ all_commits[2],
+ result[hash_of_first_commit][0],
+ "Child commit wrong")
+
+ def _get_commits(self):
+ commits = self._execute_git(
+ ["log", "--format=%H", "--reverse"]).splitlines()
+ return commits
+
+ def _make_empty_commit(self, message):
+ self._execute_git(["commit", "--allow-empty", "-m", message])
+
+ def testSearchByCommitPosition(self):
+ message = """Revert of some stuff.
+ > Cr-Commit-Position: refs/heads/master@{#28059}
+ R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+ self._make_empty_commit(message)
+
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = search_related_commits.search_all_related_commits(
+ self.base_dir, hash_of_first_commit, "HEAD", None)
+
+ self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+ def testSearchByTitle(self):
+ message = """Revert of some stuff.
+ > [turbofan] Sanitize language mode for javascript operators.
+ > Cr-Commit-Position: refs/heads/master@{#289}
+ R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+ self._make_empty_commit(message)
+
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = search_related_commits.search_all_related_commits(
+ self.base_dir, hash_of_first_commit, "HEAD", None)
+
+ self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+ def testSearchByHash(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ message = """Revert of some stuff.
+ > [turbofan] Sanitize language mode for javascript operators.
+ > Reverting """ + hash_of_first_commit + """
+ > R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+ self._make_empty_commit(message)
+
+ #Fetch again for an update
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = search_related_commits.search_all_related_commits(
+ self.base_dir,
+ hash_of_first_commit,
+ "HEAD",
+ None)
+
+ self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+ def testConsiderSeparator(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ # Related commits happen before separator so it is not a hit
+ message = """Revert of some stuff: Not a hit
+ > [turbofan] Sanitize language mode for javascript operators.
+ > Reverting """ + hash_of_first_commit + """
+ > R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28088}"""
+ self._make_empty_commit(message)
+
+ # Related commits happen before and after separator so it is a hit
+ commit_pos_of_master = "27088"
+ message = """Implement awesome feature: Master commit
+
+ Review URL: https://codereview.chromium.org/1084243235
+
+ Cr-Commit-Position: refs/heads/master@{#""" + commit_pos_of_master + "}"
+ self._make_empty_commit(message)
+
+ # Separator commit
+ message = """Commit which is the origin of the branch
+
+ Review URL: https://codereview.chromium.org/1084243456
+
+ Cr-Commit-Position: refs/heads/master@{#28173}"""
+ self._make_empty_commit(message)
+
+ # Filler commit
+ message = "Some unrelated commit: Not a hit"
+ self._make_empty_commit(message)
+
+ # Related commit after separator: a hit
+ message = "Patch r" + commit_pos_of_master +""" done
+
+ Review URL: https://codereview.chromium.org/1084243235
+
+ Cr-Commit-Position: refs/heads/master@{#29567}"""
+ self._make_empty_commit(message)
+
+ #Fetch again for an update
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+ hash_of_hit = commits[3]
+ hash_of_separator = commits[4]
+ hash_of_child_hit = commits[6]
+
+ result = search_related_commits.search_all_related_commits(
+ self.base_dir,
+ hash_of_first_commit,
+ "HEAD",
+ hash_of_separator)
+
+ self.assertTrue(result.get(hash_of_hit), "Hit not found")
+ self.assertEqual(len(result), 1, "More than one hit found")
+ self.assertEqual(
+ len(result.get(hash_of_hit)),
+ 1,
+ "More than one child hit found")
+ self.assertEqual(
+ result.get(hash_of_hit)[0],
+ hash_of_child_hit,
+ "Wrong commit found")
+
+ def testPrettyPrint(self):
+ message = """Revert of some stuff.
+ > [turbofan] Sanitize language mode for javascript operators.
+ > Cr-Commit-Position: refs/heads/master@{#289}
+ R=mstarzinger@chromium.org
+
+ Review URL: https://codereview.chromium.org/1084243005
+
+ Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+ self._make_empty_commit(message)
+
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+ OptionsStruct = namedtuple(
+ "OptionsStruct",
+ "git_dir of until all prettyprint separator verbose")
+ options = OptionsStruct(
+ git_dir= self.base_dir,
+ of= [hash_of_first_commit],
+ until= [commits[2]],
+ all= True,
+ prettyprint= True,
+ separator = None,
+ verbose=False)
+ output = []
+ for current_line in search_related_commits.main(options):
+ output.append(current_line)
+
+ self.assertIs(len(output), 2, "Not exactly two entries written")
+ self.assertTrue(output[0].startswith("+"), "Master entry not marked with +")
+ self.assertTrue(output[1].startswith("| "), "Child entry not marked with |")
+
+ def testNothingFound(self):
+ commits = self._get_commits()
+
+ self._execute_git(["commit", "--allow-empty", "-m", "A"])
+ self._execute_git(["commit", "--allow-empty", "-m", "B"])
+ self._execute_git(["commit", "--allow-empty", "-m", "C"])
+ self._execute_git(["commit", "--allow-empty", "-m", "D"])
+
+ hash_of_first_commit = commits[0]
+ result = search_related_commits.search_all_related_commits(
+ self.base_dir,
+ hash_of_first_commit,
+ "HEAD",
+ None)
+
+ self.assertEqual(len(result), 0, "Results found where none should be.")
+
+
+if __name__ == "__main__":
+ #import sys;sys.argv = ['', 'Test.testName']
+ unittest.main()
diff --git a/tools/run-deopt-fuzzer.py b/tools/run-deopt-fuzzer.py
index a6fdf318..70e106ec 100755
--- a/tools/run-deopt-fuzzer.py
+++ b/tools/run-deopt-fuzzer.py
@@ -66,6 +66,8 @@ SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
"arm",
"ia32",
+ "ppc",
+ "ppc64",
"mipsel",
"nacl_ia32",
"nacl_x64",
@@ -314,6 +316,7 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data:
@@ -377,7 +380,9 @@ def Execute(arch, mode, args, options, suites, workspace):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
- False) # No predictable mode.
+ False, # No predictable mode.
+ False, # No no_harness mode.
+ False) # Don't use perf data.
# Find available test suites and read test cases from them.
variables = {
@@ -385,6 +390,8 @@ def Execute(arch, mode, args, options, suites, workspace):
"asan": options.asan,
"deopt_fuzzer": True,
"gc_stress": False,
+ "gcov_coverage": False,
+ "ignition": False,
"isolates": options.isolates,
"mode": mode,
"no_i18n": False,
@@ -394,6 +401,9 @@ def Execute(arch, mode, args, options, suites, workspace):
"tsan": False,
"msan": False,
"dcheck_always_on": options.dcheck_always_on,
+ "novfp3": False,
+ "predictable": False,
+ "byteorder": sys.byteorder,
}
all_tests = []
num_tests = 0
@@ -412,7 +422,7 @@ def Execute(arch, mode, args, options, suites, workspace):
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
- s.tests = [ t.CopyAddingFlags(analysis_flags) for t in s.tests ]
+ s.tests = [ t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests ]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
@@ -459,7 +469,7 @@ def Execute(arch, mode, args, options, suites, workspace):
print "%s %s" % (t.path, distribution)
for i in distribution:
fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
- s.tests.append(t.CopyAddingFlags(fuzzing_flags))
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
diff --git a/tools/run-tests.py b/tools/run-tests.py
index d68d1f86..fe8091ef 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -44,36 +44,48 @@ import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
-from testrunner.local.testsuite import VARIANT_FLAGS
+from testrunner.local.testsuite import ALL_VARIANTS
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
+# Base dir of the v8 checkout to be used as cwd.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
ARCH_GUESS = utils.DefaultArch()
-DEFAULT_TESTS = [
- "mjsunit",
- "unittests",
- "cctest",
- "message",
- "preparser",
-]
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
+ "bot_default": [
+ "mjsunit",
+ "cctest",
+ "webkit",
+ "message",
+ "preparser",
+ "intl",
+ "unittests",
+ ],
"default": [
"mjsunit",
"cctest",
"message",
"preparser",
+ "intl",
+ "unittests",
+ ],
+ "ignition": [
+ "mjsunit",
+ "cctest",
],
"optimize_for_size": [
"mjsunit",
"cctest",
"webkit",
+ "intl",
],
"unittests": [
"unittests",
@@ -81,17 +93,61 @@ TEST_MAP = {
}
TIMEOUT_DEFAULT = 60
-TIMEOUT_SCALEFACTOR = {"debug" : 4,
- "release" : 1 }
-VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan"]
-MODE_FLAGS = {
- "debug" : ["--nohard-abort", "--nodead-code-elimination",
- "--nofold-constants", "--enable-slow-asserts",
- "--debug-code", "--verify-heap"],
- "release" : ["--nohard-abort", "--nodead-code-elimination",
- "--nofold-constants"]}
+EXHAUSTIVE_VARIANTS = VARIANTS + [
+ "nocrankshaft",
+ "turbofan_opt",
+]
+
+DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+ "--nofold-constants", "--enable-slow-asserts",
+ "--debug-code", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+ "--nofold-constants"]
+
+MODES = {
+ "debug": {
+ "flags": DEBUG_FLAGS,
+ "timeout_scalefactor": 4,
+ "status_mode": "debug",
+ "execution_mode": "debug",
+ "output_folder": "debug",
+ },
+ "optdebug": {
+ "flags": DEBUG_FLAGS,
+ "timeout_scalefactor": 4,
+ "status_mode": "debug",
+ "execution_mode": "debug",
+ "output_folder": "optdebug",
+ },
+ "release": {
+ "flags": RELEASE_FLAGS,
+ "timeout_scalefactor": 1,
+ "status_mode": "release",
+ "execution_mode": "release",
+ "output_folder": "release",
+ },
+ # Normal trybot release configuration. There, dchecks are always on which
+ # implies debug is set. Hence, the status file needs to assume debug-like
+ # behavior/timeouts.
+ "tryrelease": {
+ "flags": RELEASE_FLAGS,
+ "timeout_scalefactor": 1,
+ "status_mode": "debug",
+ "execution_mode": "release",
+ "output_folder": "release",
+ },
+ # This mode requires v8 to be compiled with dchecks and slow dchecks.
+ "slowrelease": {
+ "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
+ "timeout_scalefactor": 2,
+ "status_mode": "debug",
+ "execution_mode": "release",
+ "output_folder": "release",
+ },
+}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
@@ -101,14 +157,18 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
+ "android_x64",
"arm",
"ia32",
"x87",
"mips",
"mipsel",
+ "mips64",
"mips64el",
"nacl_ia32",
"nacl_x64",
+ "ppc",
+ "ppc64",
"x64",
"x32",
"arm64"]
@@ -116,9 +176,11 @@ SUPPORTED_ARCHS = ["android_arm",
SLOW_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
+ "android_x64",
"arm",
"mips",
"mipsel",
+ "mips64",
"mips64el",
"nacl_ia32",
"nacl_x64",
@@ -128,9 +190,11 @@ SLOW_ARCHS = ["android_arm",
def BuildOptions():
result = optparse.OptionParser()
+ result.usage = '%prog [options] [tests]'
+ result.description = """TESTS: %s""" % (TEST_MAP["default"])
result.add_option("--arch",
help=("The architecture to run tests for, "
- "'auto' or 'native' for auto-detect"),
+ "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
@@ -138,12 +202,18 @@ def BuildOptions():
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
+ result.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
+ default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--dcheck-always-on",
help="Indicates that V8 was compiled with DCHECKs enabled",
default=False, action="store_true")
+ result.add_option("--novfp3",
+ help="Indicates that V8 was compiled without VFP3 support",
+ default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
@@ -158,21 +228,33 @@ def BuildOptions():
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
+ result.add_option("--gcov-coverage",
+ help="Uses executables instrumented for gcov coverage",
+ default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
+ result.add_option("--download-data-only",
+ help="Download missing test suite data and exit",
+ default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
+ result.add_option("--ignition", help="Skip tests which don't run in ignition",
+ default=False, action="store_true")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
- help="The test modes in which to run (comma-separated)",
+ help="The test modes in which to run (comma-separated,"
+ " uppercase for ninja and buildbot builds): %s" % MODES.keys(),
default="release,debug")
+ result.add_option("--no-harness", "--noharness",
+ help="Run without test harness of a given suite",
+ default=False, action="store_true")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
@@ -196,7 +278,10 @@ def BuildOptions():
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
- help="Comma-separated list of testing variants")
+ help="Comma-separated list of testing variants: %s" % VARIANTS)
+ result.add_option("--exhaustive-variants",
+ default=False, action="store_true",
+ help="Use exhaustive set of default variants.")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("--predictable",
@@ -235,6 +320,9 @@ def BuildOptions():
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
+ result.add_option("--swarming",
+ help="Indicates running test driver on swarming.",
+ default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -252,16 +340,71 @@ def BuildOptions():
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
- result.add_option("--random-seed", default=0, dest="random_seed",
+ result.add_option("--random-seed", default=0, dest="random_seed", type="int",
help="Default seed for initializing random generator")
+ result.add_option("--random-seed-stress-count", default=1, type="int",
+ dest="random_seed_stress_count",
+ help="Number of runs with different random seeds")
result.add_option("--msan",
help="Regard test expectations for MSAN",
default=False, action="store_true")
return result
+def RandomSeed():
+ seed = 0
+ while not seed:
+ seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ return seed
+
+
+def BuildbotToV8Mode(config):
+ """Convert buildbot build configs to configs understood by the v8 runner.
+
+ V8 configs are always lower case and without the additional _x64 suffix for
+ 64 bit builds on windows with ninja.
+ """
+ mode = config[:-4] if config.endswith('_x64') else config
+ return mode.lower()
+
+def SetupEnvironment(options):
+ """Setup additional environment variables."""
+ symbolizer = 'external_symbolizer_path=%s' % (
+ os.path.join(
+ BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-symbolizer',
+ )
+ )
+
+ if options.asan:
+ os.environ['ASAN_OPTIONS'] = symbolizer
+
+ if options.cfi_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ 'print_summary=1',
+ 'symbolize=1',
+ symbolizer,
+ ])
+
+ if options.msan:
+ os.environ['MSAN_OPTIONS'] = symbolizer
+
+ if options.tsan:
+ suppressions_file = os.path.join(
+ BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
+ os.environ['TSAN_OPTIONS'] = " ".join([
+ symbolizer,
+ 'suppressions=%s' % suppressions_file,
+ 'exit_code=0',
+ 'report_thread_leaks=0',
+ 'history_size=7',
+ 'report_destroy_locked=0',
+ ])
+
def ProcessOptions(options):
- global VARIANT_FLAGS
+ global ALL_VARIANTS
+ global EXHAUSTIVE_VARIANTS
global VARIANTS
# Architecture and mode related stuff.
@@ -272,7 +415,7 @@ def ProcessOptions(options):
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode.lower() in ["debug", "release", "optdebug"]:
+ if not BuildbotToV8Mode(mode) in MODES:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -294,6 +437,8 @@ def ProcessOptions(options):
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
+ if options.download_data_only:
+ options.no_presubmit = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
@@ -306,20 +451,27 @@ def ProcessOptions(options):
if options.asan:
options.extra_flags.append("--invoke-weak-callbacks")
+ options.extra_flags.append("--omit-quit")
+
+ if options.novfp3:
+ options.extra_flags.append("--noenable-vfp3")
+
+ if options.exhaustive_variants:
+ # This is used on many bots. It includes a larger set of default variants.
+ # Other options for manipulating variants still apply afterwards.
+ VARIANTS = EXHAUSTIVE_VARIANTS
+
+ if options.msan:
+ VARIANTS = ["default"]
if options.tsan:
VARIANTS = ["default"]
- suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
- 'sanitizers', 'tsan_suppressions.txt')
- tsan_options = '%s suppressions=%s' % (
- os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
- os.environ['TSAN_OPTIONS'] = tsan_options
if options.j == 0:
options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ if options.random_seed_stress_count <= 1 and options.random_seed == 0:
+ options.random_seed = RandomSeed()
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
@@ -343,8 +495,8 @@ def ProcessOptions(options):
VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
- if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
- print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
+ if not set(VARIANTS).issubset(ALL_VARIANTS):
+ print "All variants must be in %s" % str(ALL_VARIANTS)
return False
if options.predictable:
VARIANTS = ["default"]
@@ -372,12 +524,33 @@ def ProcessOptions(options):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
- if not options.no_i18n:
- DEFAULT_TESTS.append("intl")
+ if options.no_i18n:
+ TEST_MAP["bot_default"].remove("intl")
+ TEST_MAP["default"].remove("intl")
return True
-def ShardTests(tests, shard_count, shard_run):
+def ShardTests(tests, options):
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count:
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if options.shard_run > 1 and options.shard_run != shard_run:
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
@@ -394,78 +567,89 @@ def ShardTests(tests, shard_count, shard_run):
def Main():
+ # Use the v8 root as cwd as some test cases use "load" with relative paths.
+ os.chdir(BASE_DIR)
+
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
+ SetupEnvironment(options)
exit_code = 0
- workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
exit_code = subprocess.call(
- [sys.executable, join(workspace, "tools", "presubmit.py")])
+ [sys.executable, join(BASE_DIR, "tools", "presubmit.py")])
- suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
+
+ # Use default tests if no test configuration was provided at the cmd line.
+ if len(args) == 0:
+ args = ["default"]
# Expand arguments with grouped tests. The args should reflect the list of
# suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in TEST_MAP:
- return [suite for suite in TEST_MAP[arg]]
+ return [suite for suite in TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
- if len(args) == 0:
- suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
- else:
- args_suites = OrderedDict() # Used as set
- for arg in args:
- args_suites[arg.split(os.path.sep)[0]] = True
- suite_paths = [ s for s in args_suites if s in suite_paths ]
+ args_suites = OrderedDict() # Used as set
+ for arg in args:
+ args_suites[arg.split('/')[0]] = True
+ suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(workspace, "test", root))
+ os.path.join(BASE_DIR, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
- if options.download_data:
+ if options.download_data or options.download_data_only:
for s in suites:
s.DownloadData()
+ if options.download_data_only:
+ return exit_code
+
for (arch, mode) in options.arch_and_mode:
try:
- code = Execute(arch, mode, args, options, suites, workspace)
+ code = Execute(arch, mode, args, options, suites)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
-def Execute(arch, mode, args, options, suites, workspace):
+def Execute(arch, mode, args, options, suites):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
- shell_dir = os.path.join(workspace, options.outdir, mode)
- mode = mode.lower()
+ # TODO(machenbach): Get rid of different output folder location on
+ # buildbot. Currently this is capitalized Release and Debug.
+ shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
+ mode = BuildbotToV8Mode(mode)
else:
- shell_dir = os.path.join(workspace, options.outdir,
- "%s.%s" % (arch, mode))
- shell_dir = os.path.relpath(shell_dir)
-
- if mode == "optdebug":
- mode = "debug" # "optdebug" is just an alias.
+ shell_dir = os.path.join(
+ BASE_DIR,
+ options.outdir,
+ "%s.%s" % (arch, MODES[mode]["output_folder"]),
+ )
+ if not os.path.exists(shell_dir):
+ raise Exception('Could not find shell_dir: "%s"' % shell_dir)
# Populate context object.
- mode_flags = MODE_FLAGS[mode]
+ mode_flags = MODES[mode]["flags"]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
@@ -474,14 +658,20 @@ def Execute(arch, mode, args, options, suites, workspace):
else:
timeout = TIMEOUT_DEFAULT;
- timeout *= TIMEOUT_SCALEFACTOR[mode]
+ timeout *= MODES[mode]["timeout_scalefactor"]
if options.predictable:
# Predictable mode is slower.
timeout *= 2
- ctx = context.Context(arch, mode, shell_dir,
- mode_flags, options.verbose,
+ # TODO(machenbach): Remove temporary verbose output on windows after
+ # debugging driver-hung-up on XP.
+ verbose_output = (
+ options.verbose or
+ utils.IsWindows() and options.progress == "verbose"
+ )
+ ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
+ mode_flags, verbose_output,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
@@ -490,11 +680,14 @@ def Execute(arch, mode, args, options, suites, workspace):
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
- options.predictable)
+ options.predictable,
+ options.no_harness,
+ use_perf_data=not options.swarming)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
- arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el'] and \
+ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
+ 'ppc', 'ppc64'] and \
ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
@@ -502,8 +695,10 @@ def Execute(arch, mode, args, options, suites, workspace):
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
+ "gcov_coverage": options.gcov_coverage,
+ "ignition": options.ignition,
"isolates": options.isolates,
- "mode": mode,
+ "mode": MODES[mode]["status_mode"],
"no_i18n": options.no_i18n,
"no_snap": options.no_snap,
"simulator_run": simulator_run,
@@ -512,10 +707,12 @@ def Execute(arch, mode, args, options, suites, workspace):
"tsan": options.tsan,
"msan": options.msan,
"dcheck_always_on": options.dcheck_always_on,
+ "novfp3": options.novfp3,
+ "predictable": options.predictable,
+ "byteorder": sys.byteorder,
}
all_tests = []
num_tests = 0
- test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
@@ -527,15 +724,32 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
- s.tests = [ t.CopyAddingFlags(v)
- for t in s.tests
- for v in s.VariantFlags(t, variant_flags) ]
- s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
+ variant_gen = s.CreateVariantGenerator(VARIANTS)
+ variant_tests = [ t.CopyAddingFlags(v, flags)
+ for t in s.tests
+ for v in variant_gen.FilterVariantsByTest(t)
+ for flags in variant_gen.GetFlagSets(t, v) ]
+
+ if options.random_seed_stress_count > 1:
+ # Duplicate test for random seed stress mode.
+ def iter_seed_flags():
+ for i in range(0, options.random_seed_stress_count):
+ # Use given random seed for all runs (set by default in execution.py)
+ # or a new random seed if none is specified.
+ if options.random_seed:
+ yield []
+ else:
+ yield ["--random-seed=%d" % RandomSeed()]
+ s.tests = [
+ t.CopyAddingFlags(t.variant, flags)
+ for t in variant_tests
+ for flags in iter_seed_flags()
+ ]
+ else:
+ s.tests = variant_tests
+
+ s.tests = ShardTests(s.tests, options)
num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- test_id += 1
if options.cat:
return 0 # We're done here.
@@ -543,23 +757,22 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.report:
verbose.PrintReport(all_tests)
- if num_tests == 0:
- print "No tests to run."
- return 0
-
# Run the tests, either locally or distributed on the network.
start_time = time.time()
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ progress_indicator = progress.IndicatorNotifier()
+ progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout:
- progress_indicator = progress.JUnitTestProgressIndicator(
- progress_indicator, options.junitout, options.junittestsuite)
+ progress_indicator.Register(progress.JUnitTestProgressIndicator(
+ options.junitout, options.junittestsuite))
if options.json_test_results:
- progress_indicator = progress.JsonTestProgressIndicator(
- progress_indicator, options.json_test_results, arch, mode)
+ progress_indicator.Register(progress.JsonTestProgressIndicator(
+ options.json_test_results, arch, MODES[mode]["execution_mode"],
+ ctx.random_seed))
run_networked = not options.no_network
if not run_networked:
- print("Network distribution disabled, running tests locally.")
+ if verbose_output:
+ print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
@@ -578,7 +791,7 @@ def Execute(arch, mode, args, options, suites, workspace):
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
- ctx, peers, workspace)
+ ctx, peers, BASE_DIR)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
@@ -587,6 +800,15 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
+
+ if num_tests == 0:
+ print("Warning: no tests were run!")
+
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file generated "
+ "with failure information.")
+ exit_code = 0
+
return exit_code
diff --git a/tools/run-valgrind.py b/tools/run-valgrind.py
index f25f7a11..e3f84f58 100755
--- a/tools/run-valgrind.py
+++ b/tools/run-valgrind.py
@@ -29,23 +29,47 @@
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
+# Uses valgrind from third_party/valgrind. Assumes the executable is passed
+# with a path relative to the v8 root.
+
+from os import path
+import platform
+import re
import subprocess
import sys
-import re
+
+V8_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
+MACHINE = 'linux_x64' if platform.machine() == 'x86_64' else 'linux_x86'
+VALGRIND_ROOT = path.join(V8_ROOT, 'third_party', 'valgrind', MACHINE)
+VALGRIND_BIN = path.join(VALGRIND_ROOT, 'bin', 'valgrind')
+VALGRIND_LIB = path.join(VALGRIND_ROOT, 'lib', 'valgrind')
VALGRIND_ARGUMENTS = [
- 'valgrind',
+ VALGRIND_BIN,
'--error-exitcode=1',
'--leak-check=full',
- '--smc-check=all'
+ '--smc-check=all',
]
+if len(sys.argv) < 2:
+ print 'Please provide an executable to analyze.'
+ sys.exit(1)
+
+executable = path.join(V8_ROOT, sys.argv[1])
+if not path.exists(executable):
+ print 'Cannot find the file specified: %s' % executable
+ sys.exit(1)
+
# Compute the command line.
-command = VALGRIND_ARGUMENTS + sys.argv[1:]
+command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
# Run valgrind.
-process = subprocess.Popen(command, stderr=subprocess.PIPE)
+process = subprocess.Popen(
+ command,
+ stderr=subprocess.PIPE,
+ env={'VALGRIND_LIB': VALGRIND_LIB}
+)
code = process.wait();
errors = process.stderr.readlines();
@@ -74,4 +98,5 @@ if len(leaks) < 2 or len(leaks) > 3:
sys.exit(1)
# No leaks found.
+sys.stderr.writelines(errors)
sys.exit(0)
diff --git a/tools/run_perf.py b/tools/run_perf.py
index 63c91485..a8cc3fab 100755
--- a/tools/run_perf.py
+++ b/tools/run_perf.py
@@ -102,16 +102,14 @@ import math
import optparse
import os
import re
+import subprocess
import sys
from testrunner.local import commands
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
-SUPPORTED_ARCHS = ["android_arm",
- "android_arm64",
- "android_ia32",
- "arm",
+SUPPORTED_ARCHS = ["arm",
"ia32",
"mips",
"mipsel",
@@ -123,22 +121,23 @@ SUPPORTED_ARCHS = ["android_arm",
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
+TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
def LoadAndroidBuildTools(path): # pragma: no cover
assert os.path.exists(path)
sys.path.insert(0, path)
- from pylib.device import device_utils # pylint: disable=F0401
+ from pylib.device import adb_wrapper # pylint: disable=F0401
from pylib.device import device_errors # pylint: disable=F0401
+ from pylib.device import device_utils # pylint: disable=F0401
from pylib.perf import cache_control # pylint: disable=F0401
from pylib.perf import perf_control # pylint: disable=F0401
- import pylib.android_commands # pylint: disable=F0401
+ global adb_wrapper
global cache_control
global device_errors
global device_utils
global perf_control
- global pylib
def GeometricMean(values):
@@ -172,6 +171,174 @@ class Results(object):
return str(self.ToDict())
+class Measurement(object):
+ """Represents a series of results of one trace.
+
+ The results are from repetitive runs of the same executable. They are
+ gathered by repeated calls to ConsumeOutput.
+ """
+ def __init__(self, graphs, units, results_regexp, stddev_regexp):
+ self.name = graphs[-1]
+ self.graphs = graphs
+ self.units = units
+ self.results_regexp = results_regexp
+ self.stddev_regexp = stddev_regexp
+ self.results = []
+ self.errors = []
+ self.stddev = ""
+
+ def ConsumeOutput(self, stdout):
+ try:
+ result = re.search(self.results_regexp, stdout, re.M).group(1)
+ self.results.append(str(float(result)))
+ except ValueError:
+ self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
+ % (self.results_regexp, self.name))
+ except:
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
+ % (self.results_regexp, self.name))
+
+ try:
+ if self.stddev_regexp and self.stddev:
+ self.errors.append("Test %s should only run once since a stddev "
+ "is provided by the test." % self.name)
+ if self.stddev_regexp:
+ self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
+ except:
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
+ % (self.stddev_regexp, self.name))
+
+ def GetResults(self):
+ return Results([{
+ "graphs": self.graphs,
+ "units": self.units,
+ "results": self.results,
+ "stddev": self.stddev,
+ }], self.errors)
+
+
+class NullMeasurement(object):
+ """Null object to avoid having extra logic for configurations that didn't
+ run like running without patch on trybots.
+ """
+ def ConsumeOutput(self, stdout):
+ pass
+
+ def GetResults(self):
+ return Results()
+
+
+def Unzip(iterable):
+ left = []
+ right = []
+ for l, r in iterable:
+ left.append(l)
+ right.append(r)
+ return lambda: iter(left), lambda: iter(right)
+
+
+def AccumulateResults(
+ graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
+ """Iterates over the output of multiple benchmark reruns and accumulates
+ results for a configured list of traces.
+
+ Args:
+ graph_names: List of names that configure the base path of the traces. E.g.
+ ['v8', 'Octane'].
+ trace_configs: List of "TraceConfig" instances. Each trace config defines
+ how to perform a measurement.
+ iter_output: Iterator over the standard output of each test run.
+ trybot: Indicates that this is run in trybot mode, i.e. run twice, once
+ with once without patch.
+ no_patch: Indicates weather this is a trybot run without patch.
+ calc_total: Boolean flag to speficy the calculation of a summary trace.
+ Returns: A "Results" object.
+ """
+ measurements = [
+ trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
+ for stdout in iter_output():
+ for measurement in measurements:
+ measurement.ConsumeOutput(stdout)
+
+ res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
+
+ if not res.traces or not calc_total:
+ return res
+
+ # Assume all traces have the same structure.
+ if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
+ res.errors.append("Not all traces have the same number of results.")
+ return res
+
+ # Calculate the geometric means for all traces. Above we made sure that
+ # there is at least one trace and that the number of results is the same
+ # for each trace.
+ n_results = len(res.traces[0]["results"])
+ total_results = [GeometricMean(t["results"][i] for t in res.traces)
+ for i in range(0, n_results)]
+ res.traces.append({
+ "graphs": graph_names + ["Total"],
+ "units": res.traces[0]["units"],
+ "results": total_results,
+ "stddev": "",
+ })
+ return res
+
+
+def AccumulateGenericResults(graph_names, suite_units, iter_output):
+ """Iterates over the output of multiple benchmark reruns and accumulates
+ generic results.
+
+ Args:
+ graph_names: List of names that configure the base path of the traces. E.g.
+ ['v8', 'Octane'].
+ suite_units: Measurement default units as defined by the benchmark suite.
+ iter_output: Iterator over the standard output of each test run.
+ Returns: A "Results" object.
+ """
+ traces = OrderedDict()
+ for stdout in iter_output():
+ if stdout is None:
+ # The None value is used as a null object to simplify logic.
+ continue
+ for line in stdout.strip().splitlines():
+ match = GENERIC_RESULTS_RE.match(line)
+ if match:
+ stddev = ""
+ graph = match.group(1)
+ trace = match.group(2)
+ body = match.group(3)
+ units = match.group(4)
+ match_stddev = RESULT_STDDEV_RE.match(body)
+ match_list = RESULT_LIST_RE.match(body)
+ errors = []
+ if match_stddev:
+ result, stddev = map(str.strip, match_stddev.group(1).split(","))
+ results = [result]
+ elif match_list:
+ results = map(str.strip, match_list.group(1).split(","))
+ else:
+ results = [body.strip()]
+
+ try:
+ results = map(lambda r: str(float(r)), results)
+ except ValueError:
+ results = []
+ errors = ["Found non-numeric in %s" %
+ "/".join(graph_names + [graph, trace])]
+
+ trace_result = traces.setdefault(trace, Results([{
+ "graphs": graph_names + [graph, trace],
+ "units": (units or suite_units).strip(),
+ "results": [],
+ "stddev": "",
+ }], errors))
+ trace_result.traces[0]["results"].extend(results)
+ trace_result.traces[0]["stddev"] = stddev
+
+ return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+
+
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
@@ -199,13 +366,13 @@ class DefaultSentinel(Node):
self.total = False
-class Graph(Node):
+class GraphConfig(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
- super(Graph, self).__init__()
+ super(GraphConfig, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
@@ -251,49 +418,26 @@ class Graph(Node):
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
-class Trace(Graph):
- """Represents a leaf in the suite tree structure.
-
- Handles collection of measurements.
- """
+class TraceConfig(GraphConfig):
+ """Represents a leaf in the suite tree structure."""
def __init__(self, suite, parent, arch):
- super(Trace, self).__init__(suite, parent, arch)
+ super(TraceConfig, self).__init__(suite, parent, arch)
assert self.results_regexp
- self.results = []
- self.errors = []
- self.stddev = ""
- def ConsumeOutput(self, stdout):
- try:
- result = re.search(self.results_regexp, stdout, re.M).group(1)
- self.results.append(str(float(result)))
- except ValueError:
- self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
- % (self.results_regexp, self.graphs[-1]))
- except:
- self.errors.append("Regexp \"%s\" didn't match for test %s."
- % (self.results_regexp, self.graphs[-1]))
-
- try:
- if self.stddev_regexp and self.stddev:
- self.errors.append("Test %s should only run once since a stddev "
- "is provided by the test." % self.graphs[-1])
- if self.stddev_regexp:
- self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
- except:
- self.errors.append("Regexp \"%s\" didn't match for test %s."
- % (self.stddev_regexp, self.graphs[-1]))
+ def CreateMeasurement(self, trybot, no_patch):
+ if not trybot and no_patch:
+ # Use null object for no-patch logic if this is not a trybot run.
+ return NullMeasurement()
- def GetResults(self):
- return Results([{
- "graphs": self.graphs,
- "units": self.units,
- "results": self.results,
- "stddev": self.stddev,
- }], self.errors)
+ return Measurement(
+ self.graphs,
+ self.units,
+ self.results_regexp,
+ self.stddev_regexp,
+ )
-class Runnable(Graph):
+class RunnableConfig(GraphConfig):
"""Represents a runnable suite definition (i.e. has a main file).
"""
@property
@@ -309,127 +453,97 @@ class Runnable(Graph):
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
- def GetCommandFlags(self):
+ def GetCommandFlags(self, extra_flags=None):
suffix = ["--"] + self.test_flags if self.test_flags else []
- return self.flags + [self.main] + suffix
+ return self.flags + (extra_flags or []) + [self.main] + suffix
- def GetCommand(self, shell_dir):
+ def GetCommand(self, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
- return [os.path.join(shell_dir, self.binary)] + self.GetCommandFlags()
+ extra_flags = extra_flags or []
+ cmd = [os.path.join(shell_dir, self.binary)]
+ if self.binary != 'd8' and '--prof' in extra_flags:
+ print "Profiler supported only on a benchmark run with d8"
+ return cmd + self.GetCommandFlags(extra_flags=extra_flags)
- def Run(self, runner):
+ def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces."""
- for stdout in runner():
- for trace in self._children:
- trace.ConsumeOutput(stdout)
- res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
-
- if not res.traces or not self.total:
- return res
-
- # Assume all traces have the same structure.
- if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
- res.errors.append("Not all traces have the same number of results.")
- return res
-
- # Calculate the geometric means for all traces. Above we made sure that
- # there is at least one trace and that the number of results is the same
- # for each trace.
- n_results = len(res.traces[0]["results"])
- total_results = [GeometricMean(t["results"][i] for t in res.traces)
- for i in range(0, n_results)]
- res.traces.append({
- "graphs": self.graphs + ["Total"],
- "units": res.traces[0]["units"],
- "results": total_results,
- "stddev": "",
- })
- return res
-
-class RunnableTrace(Trace, Runnable):
+ stdout_with_patch, stdout_no_patch = Unzip(runner())
+ return (
+ AccumulateResults(
+ self.graphs,
+ self._children,
+ iter_output=stdout_with_patch,
+ trybot=trybot,
+ no_patch=False,
+ calc_total=self.total,
+ ),
+ AccumulateResults(
+ self.graphs,
+ self._children,
+ iter_output=stdout_no_patch,
+ trybot=trybot,
+ no_patch=True,
+ calc_total=self.total,
+ ),
+ )
+
+
+class RunnableTraceConfig(TraceConfig, RunnableConfig):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
- super(RunnableTrace, self).__init__(suite, parent, arch)
+ super(RunnableTraceConfig, self).__init__(suite, parent, arch)
- def Run(self, runner):
+ def Run(self, runner, trybot):
"""Iterates over several runs and handles the output."""
- for stdout in runner():
- self.ConsumeOutput(stdout)
- return self.GetResults()
-
-
-class RunnableGeneric(Runnable):
+ measurement_with_patch = self.CreateMeasurement(trybot, False)
+ measurement_no_patch = self.CreateMeasurement(trybot, True)
+ for stdout_with_patch, stdout_no_patch in runner():
+ measurement_with_patch.ConsumeOutput(stdout_with_patch)
+ measurement_no_patch.ConsumeOutput(stdout_no_patch)
+ return (
+ measurement_with_patch.GetResults(),
+ measurement_no_patch.GetResults(),
+ )
+
+
+class RunnableGenericConfig(RunnableConfig):
"""Represents a runnable suite definition with generic traces."""
def __init__(self, suite, parent, arch):
- super(RunnableGeneric, self).__init__(suite, parent, arch)
+ super(RunnableGenericConfig, self).__init__(suite, parent, arch)
- def Run(self, runner):
- """Iterates over several runs and handles the output."""
- traces = OrderedDict()
- for stdout in runner():
- for line in stdout.strip().splitlines():
- match = GENERIC_RESULTS_RE.match(line)
- if match:
- stddev = ""
- graph = match.group(1)
- trace = match.group(2)
- body = match.group(3)
- units = match.group(4)
- match_stddev = RESULT_STDDEV_RE.match(body)
- match_list = RESULT_LIST_RE.match(body)
- errors = []
- if match_stddev:
- result, stddev = map(str.strip, match_stddev.group(1).split(","))
- results = [result]
- elif match_list:
- results = map(str.strip, match_list.group(1).split(","))
- else:
- results = [body.strip()]
-
- try:
- results = map(lambda r: str(float(r)), results)
- except ValueError:
- results = []
- errors = ["Found non-numeric in %s" %
- "/".join(self.graphs + [graph, trace])]
-
- trace_result = traces.setdefault(trace, Results([{
- "graphs": self.graphs + [graph, trace],
- "units": (units or self.units).strip(),
- "results": [],
- "stddev": "",
- }], errors))
- trace_result.traces[0]["results"].extend(results)
- trace_result.traces[0]["stddev"] = stddev
-
- return reduce(lambda r, t: r + t, traces.itervalues(), Results())
-
-
-def MakeGraph(suite, arch, parent):
- """Factory method for making graph objects."""
- if isinstance(parent, Runnable):
+ def Run(self, runner, trybot):
+ stdout_with_patch, stdout_no_patch = Unzip(runner())
+ return (
+ AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
+ AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
+ )
+
+
+def MakeGraphConfig(suite, arch, parent):
+ """Factory method for making graph configuration objects."""
+ if isinstance(parent, RunnableConfig):
# Below a runnable can only be traces.
- return Trace(suite, parent, arch)
- elif suite.get("main"):
- # A main file makes this graph runnable.
+ return TraceConfig(suite, parent, arch)
+ elif suite.get("main") is not None:
+ # A main file makes this graph runnable. Empty strings are accepted.
if suite.get("tests"):
# This graph has subgraphs (traces).
- return Runnable(suite, parent, arch)
+ return RunnableConfig(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
- return RunnableTrace(suite, parent, arch)
+ return RunnableTraceConfig(suite, parent, arch)
elif suite.get("generic"):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
- return RunnableGeneric(suite, parent, arch)
+ return RunnableGenericConfig(suite, parent, arch)
elif suite.get("tests"):
# This is neither a leaf nor a runnable.
- return Graph(suite, parent, arch)
+ return GraphConfig(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
-def BuildGraphs(suite, arch, parent=None):
+def BuildGraphConfigs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
@@ -439,9 +553,9 @@ def BuildGraphs(suite, arch, parent=None):
if arch not in suite.get("archs", SUPPORTED_ARCHS):
return None
- graph = MakeGraph(suite, arch, parent)
+ graph = MakeGraphConfig(suite, arch, parent)
for subsuite in suite.get("tests", []):
- BuildGraphs(subsuite, arch, graph)
+ BuildGraphConfigs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
@@ -451,7 +565,7 @@ def FlattenRunnables(node, node_cb):
runnables.
"""
node_cb(node)
- if isinstance(node, Runnable):
+ if isinstance(node, RunnableConfig):
yield node
elif isinstance(node, Node):
for child in node._children:
@@ -462,17 +576,43 @@ def FlattenRunnables(node, node_cb):
class Platform(object):
+ def __init__(self, options):
+ self.shell_dir = options.shell_dir
+ self.shell_dir_no_patch = options.shell_dir_no_patch
+ self.extra_flags = options.extra_flags.split()
+
@staticmethod
def GetPlatform(options):
- if options.arch.startswith("android"):
+ if options.android_build_tools:
return AndroidPlatform(options)
else:
return DesktopPlatform(options)
+ def _Run(self, runnable, count, no_patch=False):
+ raise NotImplementedError() # pragma: no cover
+
+ def Run(self, runnable, count):
+ """Execute the benchmark's main file.
+
+ If options.shell_dir_no_patch is specified, the benchmark is run once with
+ and once without patch.
+ Args:
+ runnable: A Runnable benchmark instance.
+ count: The number of this (repeated) run.
+ Returns: A tuple with the benchmark outputs with and without patch. The
+ latter will be None if options.shell_dir_no_patch was not
+ specified.
+ """
+ stdout = self._Run(runnable, count, no_patch=False)
+ if self.shell_dir_no_patch:
+ return stdout, self._Run(runnable, count, no_patch=True)
+ else:
+ return stdout, None
+
class DesktopPlatform(Platform):
def __init__(self, options):
- self.shell_dir = options.shell_dir
+ super(DesktopPlatform, self).__init__(options)
def PreExecution(self):
pass
@@ -481,20 +621,37 @@ class DesktopPlatform(Platform):
pass
def PreTests(self, node, path):
- if isinstance(node, Runnable):
+ if isinstance(node, RunnableConfig):
node.ChangeCWD(path)
- def Run(self, runnable, count):
- output = commands.Execute(runnable.GetCommand(self.shell_dir),
- timeout=runnable.timeout)
- print ">>> Stdout (#%d):" % (count + 1)
+ def _Run(self, runnable, count, no_patch=False):
+ suffix = ' - without patch' if no_patch else ''
+ shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
+ title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
+ try:
+ output = commands.Execute(
+ runnable.GetCommand(shell_dir, self.extra_flags),
+ timeout=runnable.timeout,
+ )
+ except OSError as e: # pragma: no cover
+ print title % "OSError"
+ print e
+ return ""
+ print title % "Stdout"
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
- print ">>> Stderr (#%d):" % (count + 1)
+ print title % "Stderr"
print output.stderr
if output.timed_out:
print ">>> Test timed out after %ss." % runnable.timeout
+ if '--prof' in self.extra_flags:
+ os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
+ if os_prefix:
+ tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
+ subprocess.check_call(tick_tools + " --only-summary", shell=True)
+ else: # pragma: no cover
+ print "Profiler option currently supported on Linux and Mac OS."
return output.stdout
@@ -502,20 +659,18 @@ class AndroidPlatform(Platform): # pragma: no cover
DEVICE_DIR = "/data/local/tmp/v8/"
def __init__(self, options):
- self.shell_dir = options.shell_dir
+ super(AndroidPlatform, self).__init__(options)
LoadAndroidBuildTools(options.android_build_tools)
if not options.device:
# Detect attached device if not specified.
- devices = pylib.android_commands.GetAttachedDevices(
- hardware=True, emulator=False, offline=False)
+ devices = adb_wrapper.AdbWrapper.Devices()
assert devices and len(devices) == 1, (
"None or multiple devices detected. Please specify the device on "
"the command-line with --device")
- options.device = devices[0]
- adb_wrapper = pylib.android_commands.AndroidCommands(options.device)
- self.device = device_utils.DeviceUtils(adb_wrapper)
- self.adb = adb_wrapper.Adb()
+ options.device = str(devices[0])
+ self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
+ self.device = device_utils.DeviceUtils(self.adb_wrapper)
def PreExecution(self):
perf = perf_control.PerfControl(self.device)
@@ -529,11 +684,8 @@ class AndroidPlatform(Platform): # pragma: no cover
perf.SetDefaultPerfMode()
self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
- def _SendCommand(self, cmd):
- logging.info("adb -s %s %s" % (str(self.device), cmd))
- return self.adb.SendCommand(cmd, timeout_time=60)
-
- def _PushFile(self, host_dir, file_name, target_rel="."):
+ def _PushFile(self, host_dir, file_name, target_rel=".",
+ skip_if_missing=False):
file_on_host = os.path.join(host_dir, file_name)
file_on_device_tmp = os.path.join(
AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
@@ -541,6 +693,12 @@ class AndroidPlatform(Platform): # pragma: no cover
AndroidPlatform.DEVICE_DIR, target_rel, file_name)
folder_on_device = os.path.dirname(file_on_device)
+ # Only attempt to push files that exist.
+ if not os.path.exists(file_on_host):
+ if not skip_if_missing:
+ logging.critical('Missing file on host: %s' % file_on_host)
+ return
+
# Only push files not yet pushed in one execution.
if file_on_host in self.pushed:
return
@@ -549,14 +707,31 @@ class AndroidPlatform(Platform): # pragma: no cover
# Work-around for "text file busy" errors. Push the files to a temporary
# location and then copy them with a shell command.
- output = self._SendCommand(
- "push %s %s" % (file_on_host, file_on_device_tmp))
+ output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
# Errors look like this: "failed to copy ... ".
if output and not re.search('^[0-9]', output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + output)
- self._SendCommand("shell mkdir -p %s" % folder_on_device)
- self._SendCommand("shell cp %s %s" % (file_on_device_tmp, file_on_device))
+ self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
+ self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
+
+ def _PushExecutable(self, shell_dir, target_dir, binary):
+ self._PushFile(shell_dir, binary, target_dir)
+
+ # Push external startup data. Backwards compatible for revisions where
+ # these files didn't exist.
+ self._PushFile(
+ shell_dir,
+ "natives_blob.bin",
+ target_dir,
+ skip_if_missing=True,
+ )
+ self._PushFile(
+ shell_dir,
+ "snapshot_blob.bin",
+ target_dir,
+ skip_if_missing=True,
+ )
def PreTests(self, node, path):
suite_dir = os.path.abspath(os.path.dirname(path))
@@ -567,17 +742,25 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_rel = "."
bench_abs = suite_dir
- self._PushFile(self.shell_dir, node.binary)
- if isinstance(node, Runnable):
+ self._PushExecutable(self.shell_dir, "bin", node.binary)
+ if self.shell_dir_no_patch:
+ self._PushExecutable(
+ self.shell_dir_no_patch, "bin_no_patch", node.binary)
+
+ if isinstance(node, RunnableConfig):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
- def Run(self, runnable, count):
+ def _Run(self, runnable, count, no_patch=False):
+ suffix = ' - without patch' if no_patch else ''
+ target_dir = "bin_no_patch" if no_patch else "bin"
+ title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
cache = cache_control.CacheControl(self.device)
cache.DropRamCaches()
- binary_on_device = AndroidPlatform.DEVICE_DIR + runnable.binary
- cmd = [binary_on_device] + runnable.GetCommandFlags()
+ binary_on_device = os.path.join(
+ AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
+ cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
# Relative path to benchmark directory.
if runnable.path:
@@ -593,7 +776,7 @@ class AndroidPlatform(Platform): # pragma: no cover
retries=0,
)
stdout = "\n".join(output)
- print ">>> Stdout (#%d):" % (count + 1)
+ print title % "Stdout"
print stdout
except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout
@@ -606,7 +789,8 @@ def Main(args):
logging.getLogger().setLevel(logging.INFO)
parser = optparse.OptionParser()
parser.add_option("--android-build-tools",
- help="Path to chromium's build/android.")
+ help="Path to chromium's build/android. Specifying this "
+ "option will run tests using android platform.")
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
@@ -617,10 +801,18 @@ def Main(args):
parser.add_option("--device",
help="The device ID to run Android tests on. If not given "
"it will be autodetected.")
+ parser.add_option("--extra-flags",
+ help="Additional flags to pass to the test executable",
+ default="")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
+ parser.add_option("--json-test-results-no-patch",
+ help="Path to a file for storing json results from run "
+ "without patch.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
+ parser.add_option("--outdir-no-patch",
+ help="Base directory with compile output without patch")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
@@ -634,28 +826,35 @@ def Main(args):
print "Unknown architecture %s" % options.arch
return 1
- if (bool(options.arch.startswith("android")) !=
- bool(options.android_build_tools)): # pragma: no cover
- print ("Android architectures imply setting --android-build-tools and the "
- "other way around.")
+ if options.device and not options.android_build_tools: # pragma: no cover
+ print "Specifying a device requires Android build tools."
return 1
- if (options.device and not
- options.arch.startswith("android")): # pragma: no cover
- print "Specifying a device requires an Android architecture to be used."
+ if (options.json_test_results_no_patch and
+ not options.outdir_no_patch): # pragma: no cover
+ print("For writing json test results without patch, an outdir without "
+ "patch must be specified.")
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
- options.shell_dir = os.path.join(workspace, options.outdir, "Release")
+ build_config = "Release"
else:
- options.shell_dir = os.path.join(workspace, options.outdir,
- "%s.release" % options.arch)
+ build_config = "%s.release" % options.arch
+
+ options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+
+ if options.outdir_no_patch:
+ options.shell_dir_no_patch = os.path.join(
+ workspace, options.outdir_no_patch, build_config)
+ else:
+ options.shell_dir_no_patch = None
platform = Platform.GetPlatform(options)
results = Results()
+ results_no_patch = Results()
for path in args:
path = os.path.abspath(path)
@@ -673,7 +872,7 @@ def Main(args):
platform.PreExecution()
# Build the graph/trace tree structure.
- root = BuildGraphs(suite, options.arch)
+ root = BuildGraphConfigs(suite, options.arch)
# Callback to be called on each node on traversal.
def NodeCB(node):
@@ -691,8 +890,10 @@ def Main(args):
yield platform.Run(runnable, i)
# Let runnable iterate over all runs and handle output.
- results += runnable.Run(Runner)
-
+ result, result_no_patch = runnable.Run(
+ Runner, trybot=options.shell_dir_no_patch)
+ results += result
+ results_no_patch += result_no_patch
platform.PostExecution()
if options.json_test_results:
@@ -700,6 +901,11 @@ def Main(args):
else: # pragma: no cover
print results
+ if options.json_test_results_no_patch:
+ results_no_patch.WriteToFile(options.json_test_results_no_patch)
+ else: # pragma: no cover
+ print results_no_patch
+
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
diff --git a/tools/shell-utils.h b/tools/shell-utils.h
index 7b51d2f5..bfd729d9 100644
--- a/tools/shell-utils.h
+++ b/tools/shell-utils.h
@@ -27,6 +27,8 @@
// Utility functions used by parser-shell.
+#include "src/globals.h"
+
#include <stdio.h>
namespace v8 {
@@ -44,7 +46,7 @@ const byte* ReadFileAndRepeat(const char* name, int* size, int repeat) {
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
- int file_size = ftell(file);
+ int file_size = static_cast<int>(ftell(file));
rewind(file);
*size = file_size * repeat;
@@ -64,4 +66,5 @@ const byte* ReadFileAndRepeat(const char* name, int* size, int repeat) {
return chars;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/tools/swarming_client b/tools/swarming_client
new file mode 160000
+Subproject df6e95e7669883c8fe9ef956c69a544154701a4
diff --git a/tools/test-push-to-trunk.sh b/tools/test-push-to-trunk.sh
deleted file mode 100755
index 6c201e46..00000000
--- a/tools/test-push-to-trunk.sh
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/bin/bash
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Tests the push-to-trunk.sh script. Needs to be run in V8 base dir:
-# ./tools/test-push-to-trunk.sh
-
-# TODO(machenbach): Check automatically if expectations match.
-# TODO(machenbach): Mock out version number retrieval.
-# TODO(machenbach): Allow multiple different test cases.
-# TODO(machenbach): Allow multi line mock output.
-# TODO(machenbach): Represent test expectations/mock output without an array
-# index increment.
-
-########## Stdin for push-to-trunk.sh
-
-# Confirm push to trunk commit ID
-INPUT[0]="Y"
-# Open editor
-INPUT[1]=""
-# Confirm increment version number
-INPUT[2]="Y"
-# Reviewer for V8 CL
-INPUT[3]="reviewer@chromium.org"
-# Enter LGTM for V8 CL
-INPUT[4]="LGTM"
-# Confirm checkout sanity
-INPUT[5]="Y"
-# Manually type in trunk revision
-INPUT[6]="12345"
-# Reviewer for Chromium CL
-INPUT[7]="reviewer@chromium.org"
-
-########## Expected commands and mock output
-
-EXP[0]="git status -s -uno"
-OUT[0]=""
-EXP[1]="git status -s -b -uno"
-OUT[1]="## some_branch"
-EXP[2]="git svn fetch"
-OUT[2]=""
-EXP[3]="git branch"
-OUT[3]="not the temp branch"
-EXP[4]="git checkout -b prepare-push-temporary-branch-created-by-script"
-OUT[4]=""
-EXP[5]="git branch"
-OUT[5]="not the branch"
-EXP[6]="git branch"
-OUT[6]="not the trunk branch"
-EXP[7]="git checkout -b prepare-push svn/bleeding_edge"
-OUT[7]=""
-EXP[8]="git log -1 --format=%H ChangeLog"
-OUT[8]="hash1"
-EXP[9]="git log -1 hash1"
-OUT[9]=""
-EXP[10]="git log hash1..HEAD --format=%H"
-OUT[10]="hash2"
-EXP[11]="git log -1 hash2 --format=\"%w(80,8,8)%s\""
-OUT[11]="Log line..."
-EXP[12]="git log -1 hash2 --format=\"%B\""
-OUT[12]="BUG=6789"
-EXP[13]="git log -1 hash2 --format=\"%w(80,8,8)(%an)\""
-OUT[13]=" (author@chromium.org)"
-EXP[14]="git commit -a -m \"Prepare push to trunk. Now working on version 3.4.5.\""
-OUT[14]=""
-EXP[15]="git cl upload -r reviewer@chromium.org --send-mail"
-OUT[15]=""
-EXP[16]="git cl dcommit"
-OUT[16]=""
-EXP[17]="git svn fetch"
-OUT[17]=""
-EXP[18]="git checkout svn/bleeding_edge"
-OUT[18]=""
-EXP[19]="git log -1 --format=%H --grep=Prepare push to trunk. Now working on version 3.4.5."
-OUT[19]="hash3"
-EXP[20]="git diff svn/trunk"
-OUT[20]="patch1"
-EXP[21]="git checkout -b trunk-push svn/trunk"
-OUT[21]=""
-EXP[22]="git apply --index --reject /tmp/v8-push-to-trunk-tempfile-patch"
-OUT[22]=""
-EXP[23]="git add src/version.cc"
-OUT[23]=""
-EXP[24]="git commit -F /tmp/v8-push-to-trunk-tempfile-commitmsg"
-OUT[24]=""
-EXP[25]="git svn dcommit"
-OUT[25]="r1234"
-EXP[26]="git svn tag 3.4.5 -m \"Tagging version 3.4.5\""
-OUT[26]=""
-EXP[27]="git status -s -uno"
-OUT[27]=""
-EXP[28]="git checkout master"
-OUT[28]=""
-EXP[29]="git pull"
-OUT[29]=""
-EXP[30]="git checkout -b v8-roll-12345"
-OUT[30]=""
-EXP[31]="git commit -am Update V8 to version 3.4.5."
-OUT[31]=""
-EXP[32]="git cl upload --send-mail"
-OUT[32]=""
-EXP[33]="git checkout -f some_branch"
-OUT[33]=""
-EXP[34]="git branch -D prepare-push-temporary-branch-created-by-script"
-OUT[34]=""
-EXP[35]="git branch -D prepare-push"
-OUT[35]=""
-EXP[36]="git branch -D trunk-push"
-OUT[36]=""
-
-########## Global temp files for test input/output
-
-export TEST_OUTPUT=$(mktemp)
-export INDEX=$(mktemp)
-export MOCK_OUTPUT=$(mktemp)
-export EXPECTED_COMMANDS=$(mktemp)
-
-########## Command index
-
-inc_index() {
- local I="$(command cat $INDEX)"
- let "I+=1"
- echo "$I" > $INDEX
- echo $I
-}
-
-echo "-1" > $INDEX
-export -f inc_index
-
-########## Mock output accessor
-
-get_mock_output() {
- local I=$1
- let "I+=1"
- command sed "${I}q;d" $MOCK_OUTPUT
-}
-
-export -f get_mock_output
-
-for E in "${OUT[@]}"; do
- echo $E
-done > $MOCK_OUTPUT
-
-########## Expected commands accessor
-
-get_expected_command() {
- local I=$1
- let "I+=1"
- command sed "${I}q;d" $EXPECTED_COMMANDS
-}
-
-export -f get_expected_command
-
-for E in "${EXP[@]}"; do
- echo $E
-done > $EXPECTED_COMMANDS
-
-########## Mock commands
-
-git() {
- # All calls to git are mocked out. Expected calls and mock output are stored
- # in the EXP/OUT arrays above.
- local I=$(inc_index)
- local OUT=$(get_mock_output $I)
- local EXP=$(get_expected_command $I)
- echo "#############################" >> $TEST_OUTPUT
- echo "Com. Index: $I" >> $TEST_OUTPUT
- echo "Expected: ${EXP}" >> $TEST_OUTPUT
- echo "Actual: git $@" >> $TEST_OUTPUT
- echo "Mock Output: ${OUT}" >> $TEST_OUTPUT
- echo "${OUT}"
-}
-
-mv() {
- echo "#############################" >> $TEST_OUTPUT
- echo "mv $@" >> $TEST_OUTPUT
-}
-
-sed() {
- # Only calls to sed * -i * are mocked out.
- echo "#############################" >> $TEST_OUTPUT
- local arr=$@
- if [[ "${arr[@]}" =~ "-i" || "${arr[${#arr[@]}-1]}" == "-i" ]]; then
- echo "sed $@" >> $TEST_OUTPUT
- else
- echo "sed $@" >> $TEST_OUTPUT
- command sed "$@"
- fi
-}
-
-editor() {
- echo "#############################" >> $TEST_OUTPUT
- echo "editor $@" >> $TEST_OUTPUT
-}
-
-cd() {
- echo "#############################" >> $TEST_OUTPUT
- echo "cd $@" >> $TEST_OUTPUT
-}
-
-export -f git
-export -f mv
-export -f sed
-export -f cd
-export -f editor
-export EDITOR=editor
-
-########## Invoke script with test stdin
-
-for i in "${INPUT[@]}"; do
- echo $i
-done | tools/push-to-trunk.sh -c "path/to/chromium"
-
-echo "Collected output:"
-command cat $TEST_OUTPUT
-
-########## Clean up
-
-rm -rf $TEST_OUTPUT
-rm -rf $INDEX
-rm -rf $MOCK_OUTPUT
-rm -rf $EXPECTED_COMMANDS
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index d6445d0c..a4df32c5 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -26,28 +26,14 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
-import signal
import subprocess
import sys
-import tempfile
-import time
+from threading import Timer
from ..local import utils
from ..objects import output
-def KillProcessWithID(pid):
- if utils.IsWindows():
- os.popen('taskkill /T /F /PID %d' % pid)
- else:
- os.kill(pid, signal.SIGTERM)
-
-
-MAX_SLEEP_TIME = 0.1
-INITIAL_SLEEP_TIME = 0.0001
-SLEEP_TIME_FACTOR = 1.25
-
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
@@ -75,77 +61,60 @@ def RunProcess(verbose, timeout, args, **rest):
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
- process = subprocess.Popen(
- shell=utils.IsWindows(),
- args=popen_args,
- **rest
- )
+
+ try:
+ process = subprocess.Popen(
+ args=popen_args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **rest
+ )
+ except Exception as e:
+ sys.stderr.write("Error executing: %s\n" % popen_args)
+ raise e
+
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
- # Compute the end time - if the process crosses this limit we
- # consider it timed out.
- if timeout is None: end_time = None
- else: end_time = time.time() + timeout
- timed_out = False
- # Repeatedly check the exit code from the process in a
- # loop and keep track of whether or not it times out.
- exit_code = None
- sleep_time = INITIAL_SLEEP_TIME
- while exit_code is None:
- if (not end_time is None) and (time.time() >= end_time):
- # Kill the process and wait for it to exit.
- KillProcessWithID(process.pid)
- exit_code = process.wait()
- timed_out = True
- else:
- exit_code = process.poll()
- time.sleep(sleep_time)
- sleep_time = sleep_time * SLEEP_TIME_FACTOR
- if sleep_time > MAX_SLEEP_TIME:
- sleep_time = MAX_SLEEP_TIME
- return (exit_code, timed_out)
-
-
-def PrintError(string):
- sys.stderr.write(string)
- sys.stderr.write("\n")
-
-
-def CheckedUnlink(name):
- # On Windows, when run with -jN in parallel processes,
- # OS often fails to unlink the temp file. Not sure why.
- # Need to retry.
- # Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
- retry_count = 0
- while retry_count < 30:
+
+ def kill_process(process, timeout_result):
+ timeout_result[0] = True
try:
- os.unlink(name)
- return
- except OSError, e:
- retry_count += 1
- time.sleep(retry_count * 0.1)
- PrintError("os.unlink() " + str(e))
+ if utils.IsWindows():
+ if verbose:
+ print "Attempting to kill process %d" % process.pid
+ sys.stdout.flush()
+ tk = subprocess.Popen(
+ 'taskkill /T /F /PID %d' % process.pid,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = tk.communicate()
+ if verbose:
+ print "Taskkill results for %d" % process.pid
+ print stdout
+ print stderr
+ print "Return code: %d" % tk.returncode
+ sys.stdout.flush()
+ else:
+ process.kill()
+ except OSError:
+ sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+ # Pseudo object to communicate with timer thread.
+ timeout_result = [False]
+
+ timer = Timer(timeout, kill_process, [process, timeout_result])
+ timer.start()
+ stdout, stderr = process.communicate()
+ timer.cancel()
+ return process.returncode, timeout_result[0], stdout, stderr
def Execute(args, verbose=False, timeout=None):
- try:
- args = [ c for c in args if c != "" ]
- (fd_out, outname) = tempfile.mkstemp()
- (fd_err, errname) = tempfile.mkstemp()
- (exit_code, timed_out) = RunProcess(
- verbose,
- timeout,
- args=args,
- stdout=fd_out,
- stderr=fd_err
- )
- finally:
- # TODO(machenbach): A keyboard interrupt before the assignment to
- # fd_out|err can lead to reference errors here.
- os.close(fd_out)
- os.close(fd_err)
- out = file(outname).read()
- errors = file(errname).read()
- CheckedUnlink(outname)
- CheckedUnlink(errname)
- return output.Output(exit_code, timed_out, out, errors)
+ args = [ c for c in args if c != "" ]
+ exit_code, timed_out, stdout, stderr = RunProcess(
+ verbose,
+ timeout,
+ args=args,
+ )
+ return output.Output(exit_code, timed_out, stdout, stderr)
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index 5c5fbac9..c9fe5417 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -26,18 +26,27 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import collections
import os
import shutil
+import sys
import time
from pool import Pool
from . import commands
from . import perfdata
from . import statusfile
+from . import testsuite
from . import utils
-class Job(object):
+# Base dir of the v8 checkout.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__)))))
+TEST_DIR = os.path.join(BASE_DIR, "test")
+
+
+class Instructions(object):
def __init__(self, command, dep_command, test_id, timeout, verbose):
self.command = command
self.dep_command = dep_command
@@ -46,24 +55,119 @@ class Job(object):
self.verbose = verbose
-def RunTest(job):
- start_time = time.time()
- if job.dep_command is not None:
- dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout)
- # TODO(jkummerow): We approximate the test suite specific function
- # IsFailureOutput() by just checking the exit code here. Currently
- # only cctests define dependencies, for which this simplification is
- # correct.
- if dep_output.exit_code != 0:
- return (job.id, dep_output, time.time() - start_time)
- output = commands.Execute(job.command, job.verbose, job.timeout)
- return (job.id, output, time.time() - start_time)
+# Structure that keeps global information per worker process.
+ProcessContext = collections.namedtuple(
+ "process_context", ["suites", "context"])
+
+
+def MakeProcessContext(context):
+ """Generate a process-local context.
+
+ This reloads all suites per process and stores the global context.
+
+ Args:
+ context: The global context from the test runner.
+ """
+ suite_paths = utils.GetSuitePaths(TEST_DIR)
+ suites = {}
+ for root in suite_paths:
+ # Don't reinitialize global state as this is concurrently called from
+ # different processes.
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(TEST_DIR, root), global_init=False)
+ if suite:
+ suites[suite.name] = suite
+ return ProcessContext(suites, context)
+
+
+def GetCommand(test, context):
+ d8testflag = []
+ shell = test.suite.shell()
+ if shell == "d8":
+ d8testflag = ["--test"]
+ if utils.IsWindows():
+ shell += ".exe"
+ if context.random_seed:
+ d8testflag += ["--random-seed=%s" % context.random_seed]
+ cmd = (context.command_prefix +
+ [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+ d8testflag +
+ test.suite.GetFlagsForTestCase(test, context) +
+ context.extra_flags)
+ return cmd
+
+
+def _GetInstructions(test, context):
+ command = GetCommand(test, context)
+ timeout = context.timeout
+ if ("--stress-opt" in test.flags or
+ "--stress-opt" in context.mode_flags or
+ "--stress-opt" in context.extra_flags):
+ timeout *= 4
+ if "--noenable-vfp3" in context.extra_flags:
+ timeout *= 2
+ # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
+ # the like.
+ if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
+ timeout *= 2
+ if test.dependency is not None:
+ dep_command = [ c.replace(test.path, test.dependency) for c in command ]
+ else:
+ dep_command = None
+ return Instructions(
+ command, dep_command, test.id, timeout, context.verbose)
+
+
+class Job(object):
+ """Stores data to be sent over the multi-process boundary.
+
+ All contained fields will be pickled/unpickled.
+ """
+
+ def Run(self, process_context):
+ """Executes the job.
+
+ Args:
+ process_context: Process-local information that is initialized by the
+ executing worker.
+ """
+ raise NotImplementedError()
+
+
+class TestJob(Job):
+ def __init__(self, test):
+ self.test = test
+
+ def Run(self, process_context):
+ # Retrieve a new suite object on the worker-process side. The original
+ # suite object isn't pickled.
+ self.test.SetSuiteObject(process_context.suites)
+ instr = _GetInstructions(self.test, process_context.context)
+
+ start_time = time.time()
+ if instr.dep_command is not None:
+ dep_output = commands.Execute(
+ instr.dep_command, instr.verbose, instr.timeout)
+ # TODO(jkummerow): We approximate the test suite specific function
+ # IsFailureOutput() by just checking the exit code here. Currently
+ # only cctests define dependencies, for which this simplification is
+ # correct.
+ if dep_output.exit_code != 0:
+ return (instr.id, dep_output, time.time() - start_time)
+ output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+ return (instr.id, output, time.time() - start_time)
+
+
+def RunTest(job, process_context):
+ return job.Run(process_context)
+
class Runner(object):
def __init__(self, suites, progress_indicator, context):
self.datapath = os.path.join("out", "testrunner_data")
- self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
+ self.perf_data_manager = perfdata.GetPerfDataManager(
+ context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
@@ -71,16 +175,22 @@ class Runner(object):
if not context.no_sorting:
for t in self.tests:
t.duration = self.perfdata.FetchPerfData(t) or 1.0
+ slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+ self.tests.sort(key=slow_key, reverse=True)
self.tests.sort(key=lambda t: t.duration, reverse=True)
- self._CommonInit(len(self.tests), progress_indicator, context)
+ self._CommonInit(suites, progress_indicator, context)
- def _CommonInit(self, num_tests, progress_indicator, context):
+ def _CommonInit(self, suites, progress_indicator, context):
+ self.total = 0
+ for s in suites:
+ for t in s.tests:
+ t.id = self.total
+ self.total += 1
self.indicator = progress_indicator
- progress_indicator.runner = self
+ progress_indicator.SetRunner(self)
self.context = context
self.succeeded = 0
- self.total = num_tests
- self.remaining = num_tests
+ self.remaining = self.total
self.failed = []
self.crashed = 0
self.reran_tests = 0
@@ -92,23 +202,6 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
- def _GetJob(self, test):
- command = self.GetCommand(test)
- timeout = self.context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in self.context.mode_flags or
- "--stress-opt" in self.context.extra_flags):
- timeout *= 4
- # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
- # the like.
- if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
- timeout *= 2
- if test.dependency is not None:
- dep_command = [ c.replace(test.path, test.dependency) for c in command ]
- else:
- dep_command = None
- return Job(command, dep_command, test.id, timeout, self.context.verbose)
-
def _MaybeRerun(self, pool, test):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
@@ -129,8 +222,9 @@ class Runner(object):
test.duration = None
test.output = None
test.run += 1
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
self.remaining += 1
+ self.total += 1
def _ProcessTestNormal(self, test, result, pool):
self.indicator.AboutToRun(test)
@@ -150,6 +244,7 @@ class Runner(object):
self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
+ self._VerbosePrint("Attempting to rerun test after failure.")
self._MaybeRerun(pool, test)
# Update the perf database if the test succeeded.
return not has_unexpected_output
@@ -197,7 +292,7 @@ class Runner(object):
# remember the output for comparison.
test.run += 1
test.output = result[1]
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
# Always update the perf database.
return True
@@ -205,66 +300,70 @@ class Runner(object):
self.indicator.Starting()
self._RunInternal(jobs)
self.indicator.Done()
- if self.failed or self.remaining:
+ if self.failed:
return 1
+ elif self.remaining:
+ return 2
return 0
def _RunInternal(self, jobs):
pool = Pool(jobs)
test_map = {}
- # TODO(machenbach): Instead of filling the queue completely before
- # pool.imap_unordered, make this a generator that already starts testing
- # while the queue is filled.
- queue = []
- queued_exception = None
- for test in self.tests:
- assert test.id >= 0
- test_map[test.id] = test
- try:
- queue.append([self._GetJob(test)])
- except Exception, e:
- # If this failed, save the exception and re-raise it later (after
- # all other tests have had a chance to run).
- queued_exception = e
- continue
+ queued_exception = [None]
+ def gen_tests():
+ for test in self.tests:
+ assert test.id >= 0
+ test_map[test.id] = test
+ try:
+ yield [TestJob(test)]
+ except Exception, e:
+ # If this failed, save the exception and re-raise it later (after
+ # all other tests have had a chance to run).
+ queued_exception[0] = e
+ continue
try:
- it = pool.imap_unordered(RunTest, queue)
+ it = pool.imap_unordered(
+ fn=RunTest,
+ gen=gen_tests(),
+ process_context_fn=MakeProcessContext,
+ process_context_args=[self.context],
+ )
for result in it:
- test = test_map[result[0]]
+ if result.heartbeat:
+ self.indicator.Heartbeat()
+ continue
+ test = test_map[result.value[0]]
if self.context.predictable:
- update_perf = self._ProcessTestPredictable(test, result, pool)
+ update_perf = self._ProcessTestPredictable(test, result.value, pool)
else:
- update_perf = self._ProcessTestNormal(test, result, pool)
+ update_perf = self._ProcessTestNormal(test, result.value, pool)
if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
finally:
+ self._VerbosePrint("Closing process pool.")
pool.terminate()
+ self._VerbosePrint("Closing database connection.")
self._RunPerfSafe(lambda: self.perf_data_manager.close())
if self.perf_failures:
# Nuke perf data in case of failures. This might not work on windows as
# some files might still be open.
print "Deleting perf test data due to db corruption."
shutil.rmtree(self.datapath)
- if queued_exception:
- raise queued_exception
-
- # Make sure that any allocations were printed in predictable mode.
- assert not self.context.predictable or self.printed_allocations
-
- def GetCommand(self, test):
- d8testflag = []
- shell = test.suite.shell()
- if shell == "d8":
- d8testflag = ["--test"]
- if utils.IsWindows():
- shell += ".exe"
- cmd = (self.context.command_prefix +
- [os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
- d8testflag +
- ["--random-seed=%s" % self.context.random_seed] +
- test.suite.GetFlagsForTestCase(test, self.context) +
- self.context.extra_flags)
- return cmd
+ if queued_exception[0]:
+ raise queued_exception[0]
+
+ # Make sure that any allocations were printed in predictable mode (if we
+ # ran any tests).
+ assert (
+ not self.total or
+ not self.context.predictable or
+ self.printed_allocations
+ )
+
+ def _VerbosePrint(self, text):
+ if self.context.verbose:
+ print text
+ sys.stdout.flush()
class BreakNowException(Exception):
diff --git a/tools/testrunner/local/perfdata.py b/tools/testrunner/local/perfdata.py
index 2979dc48..29ebff77 100644
--- a/tools/testrunner/local/perfdata.py
+++ b/tools/testrunner/local/perfdata.py
@@ -118,3 +118,29 @@ class PerfDataManager(object):
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
+
+
+class NullPerfDataStore(object):
+ def UpdatePerfData(self, test):
+ pass
+
+ def FetchPerfData(self, test):
+ return None
+
+
+class NullPerfDataManager(object):
+ def __init__(self):
+ pass
+
+ def GetStore(self, *args, **kwargs):
+ return NullPerfDataStore()
+
+ def close(self):
+ pass
+
+
+def GetPerfDataManager(context, datadir):
+ if context.use_perf_data:
+ return PerfDataManager(datadir)
+ else:
+ return NullPerfDataManager()
diff --git a/tools/testrunner/local/pool.py b/tools/testrunner/local/pool.py
index 602a2d4b..6d123fd4 100644
--- a/tools/testrunner/local/pool.py
+++ b/tools/testrunner/local/pool.py
@@ -3,7 +3,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+from Queue import Empty
from multiprocessing import Event, Process, Queue
+import traceback
+
class NormalResult():
def __init__(self, result):
@@ -24,17 +27,36 @@ class BreakResult():
self.break_now = True
-def Worker(fn, work_queue, done_queue, done):
+class MaybeResult():
+ def __init__(self, heartbeat, value):
+ self.heartbeat = heartbeat
+ self.value = value
+
+ @staticmethod
+ def create_heartbeat():
+ return MaybeResult(True, None)
+
+ @staticmethod
+ def create_result(value):
+ return MaybeResult(False, value)
+
+
+def Worker(fn, work_queue, done_queue, done,
+ process_context_fn=None, process_context_args=None):
"""Worker to be run in a child process.
The worker stops on two conditions. 1. When the poison pill "STOP" is
reached or 2. when the event "done" is set."""
try:
+ kwargs = {}
+ if process_context_fn and process_context_args is not None:
+ kwargs.update(process_context=process_context_fn(*process_context_args))
for args in iter(work_queue.get, "STOP"):
if done.is_set():
break
try:
- done_queue.put(NormalResult(fn(*args)))
+ done_queue.put(NormalResult(fn(*args, **kwargs)))
except Exception, e:
+ traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
done_queue.put(ExceptionResult())
except KeyboardInterrupt:
@@ -51,7 +73,7 @@ class Pool():
# Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
BUFFER_FACTOR = 4
- def __init__(self, num_workers):
+ def __init__(self, num_workers, heartbeat_timeout=30):
self.num_workers = num_workers
self.processes = []
self.terminated = False
@@ -67,11 +89,25 @@ class Pool():
self.work_queue = Queue()
self.done_queue = Queue()
self.done = Event()
+ self.heartbeat_timeout = heartbeat_timeout
- def imap_unordered(self, fn, gen):
+ def imap_unordered(self, fn, gen,
+ process_context_fn=None, process_context_args=None):
"""Maps function "fn" to items in generator "gen" on the worker processes
in an arbitrary order. The items are expected to be lists of arguments to
- the function. Returns a results iterator."""
+ the function. Returns a results iterator. A result value of type
+ MaybeResult either indicates a heartbeat of the runner, i.e. indicating
+ that the runner is still waiting for the result to be computed, or it wraps
+ the real result.
+
+ Args:
+ process_context_fn: Function executed once by each worker. Expected to
+ return a process-context object. If present, this object is passed
+ as additional argument to each call to fn.
+ process_context_args: List of arguments for the invocation of
+ process_context_fn. All arguments will be pickled and sent beyond the
+ process boundary.
+ """
try:
gen = iter(gen)
self.advance = self._advance_more
@@ -80,13 +116,22 @@ class Pool():
p = Process(target=Worker, args=(fn,
self.work_queue,
self.done_queue,
- self.done))
+ self.done,
+ process_context_fn,
+ process_context_args))
self.processes.append(p)
p.start()
self.advance(gen)
while self.count > 0:
- result = self.done_queue.get()
+ while True:
+ try:
+ result = self.done_queue.get(timeout=self.heartbeat_timeout)
+ break
+ except Empty:
+ # Indicate a heartbeat. The iterator will continue fetching the
+ # next result.
+ yield MaybeResult.create_heartbeat()
self.count -= 1
if result.exception:
# Ignore items with unexpected exceptions.
@@ -95,7 +140,7 @@ class Pool():
# A keyboard interrupt happened in one of the worker processes.
raise KeyboardInterrupt
else:
- yield result.result
+ yield MaybeResult.create_result(result.result)
self.advance(gen)
finally:
self.terminate()
diff --git a/tools/testrunner/local/pool_unittest.py b/tools/testrunner/local/pool_unittest.py
index bf2b3f85..335d20a6 100644
--- a/tools/testrunner/local/pool_unittest.py
+++ b/tools/testrunner/local/pool_unittest.py
@@ -17,7 +17,7 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
- results.add(result)
+ results.add(result.value)
self.assertEquals(set(range(0, 10)), results)
def testException(self):
@@ -25,7 +25,7 @@ class PoolTest(unittest.TestCase):
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
# Item 10 will not appear in results due to an internal exception.
- results.add(result)
+ results.add(result.value)
expect = set(range(0, 12))
expect.remove(10)
self.assertEquals(expect, results)
@@ -34,8 +34,8 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
- results.add(result)
- if result < 30:
- pool.add([result + 20])
+ results.add(result.value)
+ if result.value < 30:
+ pool.add([result.value + 20])
self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
results)
diff --git a/tools/testrunner/local/progress.py b/tools/testrunner/local/progress.py
index 2616958c..4e1be3e4 100644
--- a/tools/testrunner/local/progress.py
+++ b/tools/testrunner/local/progress.py
@@ -26,34 +26,27 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from functools import wraps
import json
import os
import sys
import time
+from . import execution
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
-def EscapeCommand(command):
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
-
-
class ProgressIndicator(object):
def __init__(self):
self.runner = None
+ def SetRunner(self, runner):
+ self.runner = runner
+
def Starting(self):
pass
@@ -66,6 +59,9 @@ class ProgressIndicator(object):
def HasRun(self, test, has_unexpected_output):
pass
+ def Heartbeat(self):
+ pass
+
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
@@ -76,6 +72,42 @@ class ProgressIndicator(object):
'negative': negative_marker
}
+ def _EscapeCommand(self, test):
+ command = execution.GetCommand(test, self.runner.context)
+ parts = []
+ for part in command:
+ if ' ' in part:
+ # Escape spaces. We may need to escape more characters for this
+ # to work properly.
+ parts.append('"%s"' % part)
+ else:
+ parts.append(part)
+ return " ".join(parts)
+
+
+class IndicatorNotifier(object):
+ """Holds a list of progress indicators and notifies them all on events."""
+ def __init__(self):
+ self.indicators = []
+
+ def Register(self, indicator):
+ self.indicators.append(indicator)
+
+
+# Forge all generic event-dispatching methods in IndicatorNotifier, which are
+# part of the ProgressIndicator interface.
+for func_name in ProgressIndicator.__dict__:
+ func = getattr(ProgressIndicator, func_name)
+ if callable(func) and not func.__name__.startswith('_'):
+ def wrap_functor(f):
+ @wraps(f)
+ def functor(self, *args, **kwargs):
+ """Generic event dispatcher."""
+ for indicator in self.indicators:
+ getattr(indicator, f.__name__)(*args, **kwargs)
+ return functor
+ setattr(IndicatorNotifier, func_name, wrap_functor(func))
+
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
@@ -93,7 +125,7 @@ class SimpleProgressIndicator(ProgressIndicator):
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
+ print "Command: %s" % self._EscapeCommand(failed)
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
@@ -127,6 +159,11 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
+ sys.stdout.flush()
+
+ def Heartbeat(self):
+ print 'Still working...'
+ sys.stdout.flush()
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -176,7 +213,7 @@ class CompactProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ print "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
@@ -192,10 +229,12 @@ class CompactProgressIndicator(ProgressIndicator):
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
+ progress = 0 if not self.runner.total else (
+ ((self.runner.total - self.runner.remaining) * 100) //
+ self.runner.total)
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
- 'remaining': (((self.runner.total - self.runner.remaining) * 100) //
- self.runner.total),
+ 'progress': progress,
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
@@ -212,7 +251,7 @@ class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
- "\033[34m%%%(remaining) 4d\033[0m|"
+ "\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
@@ -228,7 +267,7 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
+ 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
@@ -241,29 +280,19 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, progress_indicator, junitout, junittestsuite):
- self.progress_indicator = progress_indicator
+ def __init__(self, junitout, junittestsuite):
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
- def Starting(self):
- self.progress_indicator.runner = self.runner
- self.progress_indicator.Starting()
-
def Done(self):
- self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
- def AboutToRun(self, test):
- self.progress_indicator.AboutToRun(test)
-
def HasRun(self, test, has_unexpected_output):
- self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
@@ -272,7 +301,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ fail_text += "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
@@ -285,39 +314,46 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, progress_indicator, json_test_results, arch, mode):
- self.progress_indicator = progress_indicator
+ def __init__(self, json_test_results, arch, mode, random_seed):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
+ self.random_seed = random_seed
self.results = []
-
- def Starting(self):
- self.progress_indicator.runner = self.runner
- self.progress_indicator.Starting()
+ self.tests = []
def Done(self):
- self.progress_indicator.Done()
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
+ # Sort tests by duration.
+ timed_tests = [t for t in self.tests if t.duration is not None]
+ timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+ slowest_tests = [
+ {
+ "name": test.GetLabel(),
+ "flags": test.flags,
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
+ "duration": test.duration,
+ } for test in timed_tests[:20]
+ ]
+
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
+ "slowest_tests": slowest_tests,
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
- def AboutToRun(self, test):
- self.progress_indicator.AboutToRun(test)
-
def HasRun(self, test, has_unexpected_output):
- self.progress_indicator.HasRun(test, has_unexpected_output)
+ # Buffer all tests for sorting the durations in the end.
+ self.tests.append(test)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
@@ -326,14 +362,20 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
- "command": EscapeCommand(self.runner.GetCommand(test)).replace(
- ABS_PATH_PREFIX, ""),
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
+ "duration": test.duration,
+
+ # TODO(machenbach): This stores only the global random seed from the
+ # context and not possible overrides when using random-seed stress.
+ "random_seed": self.random_seed,
+ "target_name": test.suite.shell(),
+ "variant": test.variant,
})
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index a313f050..f86106b9 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
@@ -40,12 +41,13 @@ NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
+FAIL_SLOPPY = "FAIL_SLOPPY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
- FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
+ FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
@@ -53,9 +55,11 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
- "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
- "nacl_x64", "macos", "windows", "linux"]:
+for var in ["debug", "release", "big", "little",
+ "android_arm", "android_arm64", "android_ia32", "android_x87",
+ "android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
+ "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
+ "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
@@ -104,7 +108,7 @@ def _AddOutcome(result, new):
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
- outcomes = [outcomes]
+ outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
_AddOutcome(result, item)
@@ -122,10 +126,14 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
target_dict[rule] = result
-def ReadStatusFile(path, variables):
+def ReadContent(path):
with open(path) as f:
global KEYWORDS
- contents = eval(f.read(), KEYWORDS)
+ return eval(f.read(), KEYWORDS)
+
+
+def ReadStatusFile(path, variables):
+ contents = ReadContent(path)
rules = {}
wildcards = {}
@@ -143,3 +151,30 @@ def ReadStatusFile(path, variables):
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
+
+
+def PresubmitCheck(path):
+ contents = ReadContent(path)
+ root_prefix = os.path.basename(os.path.dirname(path)) + "/"
+ status = {"success": True}
+ def _assert(check, message): # Like "assert", but doesn't throw.
+ if not check:
+ print("%s: Error: %s" % (path, message))
+ status["success"] = False
+ try:
+ for section in contents:
+ _assert(type(section) == list, "Section must be a list")
+ _assert(len(section) == 2, "Section list must have exactly 2 entries")
+ section = section[1]
+ _assert(type(section) == dict,
+ "Second entry of section must be a dictionary")
+ for rule in section:
+ _assert(type(rule) == str, "Rule key must be a string")
+ _assert(not rule.startswith(root_prefix),
+ "Suite name prefix must not be used in rule keys")
+ _assert(not rule.endswith('.js'),
+ ".js extension must not be used in rule keys.")
+ return status["success"]
+ except Exception as e:
+ print e
+ return False
diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py
index 84f07fee..e3d1e232 100644
--- a/tools/testrunner/local/testsuite.py
+++ b/tools/testrunner/local/testsuite.py
@@ -35,27 +35,67 @@ from . import utils
from ..objects import testcase
# Use this to run several variants of the tests.
-VARIANT_FLAGS = {
- "default": [],
- "stress": ["--stress-opt", "--always-opt"],
- "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
- "nocrankshaft": ["--nocrankshaft"]}
+ALL_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt", "--always-opt"]],
+ "turbofan": [["--turbo"]],
+ "turbofan_opt": [["--turbo", "--always-opt"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+ "--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
+}
+
+# FAST_VARIANTS implies no --always-opt.
+FAST_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt"]],
+ "turbofan": [["--turbo"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+ "--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
+}
+
+ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
+ "nocrankshaft", "ignition", "preparser"])
+FAST_VARIANTS = set(["default", "turbofan"])
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantGenerator(object):
+ def __init__(self, suite, variants):
+ self.suite = suite
+ self.all_variants = ALL_VARIANTS & variants
+ self.fast_variants = FAST_VARIANTS & variants
+ self.standard_variant = STANDARD_VARIANT & variants
+
+ def FilterVariantsByTest(self, testcase):
+ if testcase.outcomes and statusfile.OnlyStandardVariant(
+ testcase.outcomes):
+ return self.standard_variant
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ return self.fast_variants
+ return self.all_variants
+
+ def GetFlagSets(self, testcase, variant):
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ return FAST_VARIANT_FLAGS[variant]
+ else:
+ return ALL_VARIANT_FLAGS[variant]
-FAST_VARIANT_FLAGS = [
- f for v, f in VARIANT_FLAGS.iteritems() if v in ["default", "turbofan"]
-]
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root):
+ def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
- except:
+ except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
@@ -63,6 +103,8 @@ class TestSuite(object):
f.close()
def __init__(self, name, root):
+ # Note: This might be called concurrently from different processes.
+ # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
@@ -70,6 +112,11 @@ class TestSuite(object):
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
+ def SetupWorkingDirectory(self):
+ # This is called once per test suite object in a multi-process setting.
+ # Multi-process-unsafe work-directory setup can go here.
+ pass
+
def shell(self):
return "d8"
@@ -89,12 +136,19 @@ class TestSuite(object):
def ListTests(self, context):
raise NotImplementedError
- def VariantFlags(self, testcase, default_flags):
- if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
- return [[]]
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
- return filter(lambda flags: flags in FAST_VARIANT_FLAGS, default_flags)
- return default_flags
+ def _VariantGeneratorFactory(self):
+ """The variant generator class to be used."""
+ return VariantGenerator
+
+ def CreateVariantGenerator(self, variants):
+ """Return a generator for the testing variants of this suite.
+
+ Args:
+ variants: List of variant names to be run as specified by the test
+ runner.
+ Returns: An object of type VariantGenerator.
+ """
+ return self._VariantGeneratorFactory()(self, set(variants))
def DownloadData(self):
pass
@@ -147,7 +201,7 @@ class TestSuite(object):
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add(rule)
- t.outcomes = self.wildcards[rule]
+ t.outcomes |= self.wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in self.wildcards"
@@ -172,23 +226,36 @@ class TestSuite(object):
print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
def FilterTestCasesByArgs(self, args):
+ """Filter test cases based on command-line arguments.
+
+ An argument with an asterisk in the end will match all test cases
+ that have the argument as a prefix. Without asterisk, only exact matches
+ will be used with the exeption of the test-suite name as argument.
+ """
filtered = []
- filtered_args = []
+ globs = []
+ exact_matches = []
for a in args:
- argpath = a.split(os.path.sep)
+ argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
- path = os.path.sep.join(argpath[1:])
+ path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
- filtered_args.append(path)
+ globs.append(path)
+ else:
+ exact_matches.append(path)
for t in self.tests:
- for a in filtered_args:
+ for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
+ for a in exact_matches:
+ if t.path == a:
+ filtered.append(t)
+ break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
@@ -236,6 +303,11 @@ class TestSuite(object):
return self.total_duration
+class StandardVariantGenerator(VariantGenerator):
+ def FilterVariantsByTest(self, testcase):
+ return self.standard_variant
+
+
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
@@ -269,5 +341,8 @@ class GoogleTestSuite(TestSuite):
["--gtest_print_time=0"] +
context.mode_flags)
+ def _VariantGeneratorFactory(self):
+ return StandardVariantGenerator
+
def shell(self):
return self.name
diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py
index 7bc21b1f..cb6c350e 100644
--- a/tools/testrunner/local/utils.py
+++ b/tools/testrunner/local/utils.py
@@ -32,6 +32,7 @@ from os.path import isdir
from os.path import join
import platform
import re
+import subprocess
import urllib2
@@ -73,6 +74,8 @@ def GuessOS():
return 'solaris'
elif system == 'NetBSD':
return 'netbsd'
+ elif system == 'AIX':
+ return 'aix'
else:
return None
@@ -99,6 +102,8 @@ def DefaultArch():
return 'ia32'
elif machine == 'amd64':
return 'ia32'
+ elif machine == 'ppc64':
+ return 'ppc'
else:
return None
@@ -117,5 +122,15 @@ def IsWindows():
def URLRetrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
+ if IsWindows():
+ try:
+ # In python 2.7.6 on windows, urlopen has a problem with redirects.
+ # Try using curl instead. Note, this is fixed in 2.7.8.
+ subprocess.check_call(["curl", source, '-k', '-L', '-o', destination])
+ return
+ except:
+ # If there's no curl, fall back to urlopen.
+ print "Curl is currently not installed. Falling back to python."
+ pass
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
diff --git a/tools/testrunner/network/endpoint.py b/tools/testrunner/network/endpoint.py
index d0950cf5..516578ac 100644
--- a/tools/testrunner/network/endpoint.py
+++ b/tools/testrunner/network/endpoint.py
@@ -93,6 +93,7 @@ def Execute(workspace, ctx, tests, sock, server):
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
suites_dict = {}
diff --git a/tools/testrunner/network/network_execution.py b/tools/testrunner/network/network_execution.py
index a43a6cfd..c842aba5 100644
--- a/tools/testrunner/network/network_execution.py
+++ b/tools/testrunner/network/network_execution.py
@@ -52,7 +52,6 @@ def GetPeers():
class NetworkedRunner(execution.Runner):
def __init__(self, suites, progress_indicator, context, peers, workspace):
self.suites = suites
- num_tests = 0
datapath = os.path.join("out", "testrunner_data")
# TODO(machenbach): These fields should exist now in the superclass.
# But there is no super constructor call. Check if this is a problem.
@@ -61,8 +60,7 @@ class NetworkedRunner(execution.Runner):
for s in suites:
for t in s.tests:
t.duration = self.perfdata.FetchPerfData(t) or 1.0
- num_tests += len(s.tests)
- self._CommonInit(num_tests, progress_indicator, context)
+ self._CommonInit(suites, progress_indicator, context)
self.tests = [] # Only used if we need to fall back to local execution.
self.tests_lock = threading.Lock()
self.peers = peers
diff --git a/tools/testrunner/objects/context.py b/tools/testrunner/objects/context.py
index 937d9089..c9853d07 100644
--- a/tools/testrunner/objects/context.py
+++ b/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
- predictable):
+ predictable, no_harness, use_perf_data):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -46,16 +46,20 @@ class Context():
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
self.predictable = predictable
+ self.no_harness = no_harness
+ self.use_perf_data = use_perf_data
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable]
+ self.rerun_failures_max, self.predictable, self.no_harness,
+ self.use_perf_data]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
- packed[8], packed[9], packed[10], packed[11], packed[12])
+ packed[8], packed[9], packed[10], packed[11], packed[12],
+ packed[13], packed[14])
diff --git a/tools/testrunner/objects/testcase.py b/tools/testrunner/objects/testcase.py
index 6c550827..fa2265c0 100644
--- a/tools/testrunner/objects/testcase.py
+++ b/tools/testrunner/objects/testcase.py
@@ -29,19 +29,22 @@
from . import output
class TestCase(object):
- def __init__(self, suite, path, flags=None, dependency=None):
+ def __init__(self, suite, path, variant='default', flags=None,
+ dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
+ self.variant = variant # name of the used testing variant
self.dependency = dependency # |path| for testcase that must be run first
- self.outcomes = None
+ self.outcomes = set([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
- def CopyAddingFlags(self, flags):
- copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
+ def CopyAddingFlags(self, variant, flags):
+ copy = TestCase(self.suite, self.path, variant, self.flags + flags,
+ self.dependency)
copy.outcomes = self.outcomes
return copy
@@ -51,16 +54,16 @@ class TestCase(object):
and returns them as a JSON serializable object.
"""
assert self.id is not None
- return [self.suitename(), self.path, self.flags,
+ return [self.suitename(), self.path, self.variant, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3])
- test.outcomes = set(task[4])
- test.id = task[5]
+ test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
+ test.outcomes = set(task[5])
+ test.id = task[6]
test.run = 1
return test
@@ -83,3 +86,11 @@ class TestCase(object):
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
+
+ def __getstate__(self):
+ """Representation to pickle test cases.
+
+ The original suite won't be sent beyond process boundaries. Instead
+ send the name only and retrieve a process-local suite later.
+ """
+ return dict(self.__dict__, suite=self.suite.name)
diff --git a/tools/testrunner/testrunner.isolate b/tools/testrunner/testrunner.isolate
new file mode 100644
index 00000000..669614b2
--- /dev/null
+++ b/tools/testrunner/testrunner.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../run-tests.py',
+ ],
+ 'files': [
+ '../run-tests.py',
+ './'
+ ],
+ },
+} \ No newline at end of file
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index 8ba3326f..dc8a87d9 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -75,6 +75,9 @@ var tickProcessor = new TickProcessor(
snapshotLogProcessor,
params.distortion,
params.range,
- sourceMap);
+ sourceMap,
+ params.timedRange,
+ params.pairwiseTimedRange,
+ params.onlySummary);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index d5447179..600d2eeb 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -154,7 +154,10 @@ function TickProcessor(
snapshotLogProcessor,
distortion,
range,
- sourceMap) {
+ sourceMap,
+ timedRange,
+ pairwiseTimedRange,
+ onlySummary) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -187,10 +190,13 @@ function TickProcessor(
'function-move': null,
'function-delete': null,
'heap-sample-item': null,
+ 'current-time': null, // Handled specially, not parsed.
// Obsolete row types.
'code-allocate': null,
'begin-code-region': null,
- 'end-code-region': null });
+ 'end-code-region': null },
+ timedRange,
+ pairwiseTimedRange);
this.cppEntriesProvider_ = cppEntriesProvider;
this.callGraphSize_ = callGraphSize;
@@ -242,6 +248,7 @@ function TickProcessor(
this.generation_ = 1;
this.currentProducerProfile_ = null;
+ this.onlySummary_ = onlySummary;
};
inherits(TickProcessor, LogReader);
@@ -292,7 +299,7 @@ TickProcessor.prototype.isCppCode = function(name) {
TickProcessor.prototype.isJsCode = function(name) {
- return !(name in this.codeTypes_);
+ return name !== "UNKNOWN" && !(name in this.codeTypes_);
};
@@ -451,29 +458,30 @@ TickProcessor.prototype.printStatistics = function() {
if (this.ignoreUnknown_) {
totalTicks -= this.ticks_.unaccounted;
}
+ var printAllTicks = !this.onlySummary_;
// Count library ticks
var flatViewNodes = flatView.head.children;
var self = this;
var libraryTicks = 0;
- this.printHeader('Shared libraries');
+ if(printAllTicks) this.printHeader('Shared libraries');
this.printEntries(flatViewNodes, totalTicks, null,
function(name) { return self.isSharedLibrary(name); },
- function(rec) { libraryTicks += rec.selfTime; });
+ function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
var nonLibraryTicks = totalTicks - libraryTicks;
var jsTicks = 0;
- this.printHeader('JavaScript');
+ if(printAllTicks) this.printHeader('JavaScript');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
function(name) { return self.isJsCode(name); },
- function(rec) { jsTicks += rec.selfTime; });
+ function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
var cppTicks = 0;
- this.printHeader('C++');
+ if(printAllTicks) this.printHeader('C++');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
function(name) { return self.isCppCode(name); },
- function(rec) { cppTicks += rec.selfTime; });
+ function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
this.printHeader('Summary');
this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
@@ -485,25 +493,27 @@ TickProcessor.prototype.printStatistics = function() {
this.ticks_.total, null);
}
- print('\n [C++ entry points]:');
- print(' ticks cpp total name');
- var c_entry_functions = this.profile_.getCEntryProfile();
- var total_c_entry = c_entry_functions[0].ticks;
- for (var i = 1; i < c_entry_functions.length; i++) {
- c = c_entry_functions[i];
- this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
- }
+ if(printAllTicks) {
+ print('\n [C++ entry points]:');
+ print(' ticks cpp total name');
+ var c_entry_functions = this.profile_.getCEntryProfile();
+ var total_c_entry = c_entry_functions[0].ticks;
+ for (var i = 1; i < c_entry_functions.length; i++) {
+ c = c_entry_functions[i];
+ this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+ }
- this.printHeavyProfHeader();
- var heavyProfile = this.profile_.getBottomUpProfile();
- var heavyView = this.viewBuilder_.buildView(heavyProfile);
- // To show the same percentages as in the flat profile.
- heavyView.head.totalTime = totalTicks;
- // Sort by total time, desc, then by name, desc.
- heavyView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(heavyView.head.children);
+ this.printHeavyProfHeader();
+ var heavyProfile = this.profile_.getBottomUpProfile();
+ var heavyView = this.viewBuilder_.buildView(heavyProfile);
+ // To show the same percentages as in the flat profile.
+ heavyView.head.totalTime = totalTicks;
+ // Sort by total time, desc, then by name, desc.
+ heavyView.sort(function(rec1, rec2) {
+ return rec2.totalTime - rec1.totalTime ||
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+ this.printHeavyProfile(heavyView.head.children);
+ }
};
@@ -595,13 +605,15 @@ TickProcessor.prototype.formatFunctionName = function(funcName) {
};
TickProcessor.prototype.printEntries = function(
- profile, totalTicks, nonLibTicks, filterP, callback) {
+ profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
var that = this;
this.processProfile(profile, filterP, function (rec) {
if (rec.selfTime == 0) return;
callback(rec);
var funcName = that.formatFunctionName(rec.internalFuncName);
- that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ if(printAllTicks) {
+ that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ }
});
};
@@ -875,13 +887,20 @@ function ArgumentsProcessor(args) {
'--distortion': ['distortion', 0,
'Specify the logging overhead in picoseconds'],
'--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output']
+ 'Specify the source map that should be used for output'],
+ '--timed-range': ['timedRange', true,
+ 'Ignore ticks before first and after last Date.now() call'],
+ '--pairwise-timed-range': ['pairwiseTimedRange', true,
+ 'Ignore ticks outside pairs of Date.now() calls'],
+ '--only-summary': ['onlySummary', true,
+ 'Print only tick summary, exclude other information']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
this.argsDispatch_['--compiler'] = this.argsDispatch_['-c'];
this.argsDispatch_['--other'] = this.argsDispatch_['-o'];
this.argsDispatch_['--external'] = this.argsDispatch_['-e'];
+ this.argsDispatch_['--ptr'] = this.argsDispatch_['--pairwise-timed-range'];
};
@@ -896,17 +915,20 @@ ArgumentsProcessor.DEFAULTS = {
targetRootFS: '',
nm: 'nm',
range: 'auto,auto',
- distortion: 0
+ distortion: 0,
+ timedRange: false,
+ pairwiseTimedRange: false,
+ onlySummary: false
};
ArgumentsProcessor.prototype.parse = function() {
while (this.args_.length) {
- var arg = this.args_[0];
+ var arg = this.args_.shift();
if (arg.charAt(0) != '-') {
- break;
+ this.result_.logFileName = arg;
+ continue;
}
- this.args_.shift();
var userValue = null;
var eqPos = arg.indexOf('=');
if (eqPos != -1) {
@@ -920,10 +942,6 @@ ArgumentsProcessor.prototype.parse = function() {
return false;
}
}
-
- if (this.args_.length >= 1) {
- this.result_.logFileName = this.args_.shift();
- }
return true;
};
@@ -948,15 +966,15 @@ ArgumentsProcessor.prototype.printUsageAndExit = function() {
ArgumentsProcessor.DEFAULTS.logFileName + '".\n');
print('Options:');
for (var arg in this.argsDispatch_) {
- var synonims = [arg];
+ var synonyms = [arg];
var dispatch = this.argsDispatch_[arg];
for (var synArg in this.argsDispatch_) {
if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
- synonims.push(synArg);
+ synonyms.push(synArg);
delete this.argsDispatch_[synArg];
}
}
- print(' ' + padRight(synonims.join(', '), 20) + dispatch[2]);
+ print(' ' + padRight(synonyms.join(', '), 20) + " " + dispatch[2]);
}
quit(2);
};
diff --git a/tools/try_perf.py b/tools/try_perf.py
index fcd1ddcb..2403f7d7 100755
--- a/tools/try_perf.py
+++ b/tools/try_perf.py
@@ -3,42 +3,94 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import find_depot_tools
+import argparse
+import os
+import subprocess
import sys
-find_depot_tools.add_depot_tools_to_path()
+BOTS = {
+ '--arm32': 'v8_arm32_perf_try',
+ '--linux32': 'v8_linux32_perf_try',
+ '--linux64': 'v8_linux64_perf_try',
+ '--linux64_atom': 'v8_linux64_atom_perf_try',
+ '--linux64_haswell': 'v8_linux64_haswell_perf_try',
+ '--nexus5': 'v8_nexus5_perf_try',
+ '--nexus7': 'v8_nexus7_perf_try',
+ '--nexus9': 'v8_nexus9_perf_try',
+ '--nexus10': 'v8_nexus10_perf_try',
+}
-from git_cl import Changelist
-
-BOTS = [
+DEFAULT_BOTS = [
+ 'v8_arm32_perf_try',
'v8_linux32_perf_try',
- 'v8_linux64_perf_try',
+ 'v8_linux64_haswell_perf_try',
+ 'v8_nexus10_perf_try',
]
-def main(tests):
- cl = Changelist()
- if not cl.GetIssue():
- print 'Need to upload first'
- return 1
+PUBLIC_BENCHMARKS = [
+ 'arewefastyet',
+ 'embenchen',
+ 'emscripten',
+ 'compile',
+ 'jetstream',
+ 'jsbench',
+ 'jstests',
+ 'kraken_orig',
+ 'massive',
+ 'memory',
+ 'octane',
+ 'octane-pr',
+ 'octane-tf',
+ 'octane-tf-pr',
+ 'simdjs',
+ 'sunspider',
+]
- props = cl.GetIssueProperties()
- if props.get('closed'):
- print 'Cannot send tryjobs for a closed CL'
- return 1
+V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
- if props.get('private'):
- print 'Cannot use trybots with private issue'
- return 1
+def main():
+ parser = argparse.ArgumentParser(description='')
+ parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
+ parser.add_argument('--extra-flags', default='',
+ help='Extra flags to be passed to the executable.')
+ for option in sorted(BOTS):
+ parser.add_argument(
+ option, dest='bots', action='append_const', const=BOTS[option],
+ help='Add %s trybot.' % BOTS[option])
+ options = parser.parse_args()
+ if not options.bots:
+ print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
+ options.bots = DEFAULT_BOTS
- if not tests:
+ if not options.benchmarks:
print 'Please specify the benchmarks to run as arguments.'
return 1
- masters = {'internal.client.v8': dict((b, tests) for b in BOTS)}
- cl.RpcServer().trigger_distributed_try_jobs(
- cl.GetIssue(), cl.GetMostRecentPatchset(), cl.GetBranch(),
- False, None, masters)
- return 0
+ for benchmark in options.benchmarks:
+ if benchmark not in PUBLIC_BENCHMARKS:
+ print ('%s not found in our benchmark list. The respective trybot might '
+ 'fail, unless you run something this script isn\'t aware of. '
+ 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
+ print 'Proceed anyways? [Y/n] ',
+ answer = sys.stdin.readline().strip()
+ if answer != "" and answer != "Y" and answer != "y":
+ return 1
+
+ assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
+ 'Invalid flag specification.')
+
+ # Ensure depot_tools are updated.
+ subprocess.check_output(
+ 'gclient', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
+
+ cmd = ['git cl try -m internal.client.v8']
+ cmd += ['-b %s' % bot for bot in options.bots]
+ benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+ cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ if options.extra_flags:
+ cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+
-if __name__ == "__main__": # pragma: no cover
- sys.exit(main(sys.argv[1:]))
+if __name__ == '__main__': # pragma: no cover
+ sys.exit(main())
diff --git a/tools/unittests/run_perf_test.py b/tools/unittests/run_perf_test.py
index f9ea0c09..1a4d7385 100644
--- a/tools/unittests/run_perf_test.py
+++ b/tools/unittests/run_perf_test.py
@@ -10,7 +10,9 @@ from mock import DEFAULT
from mock import MagicMock
import os
from os import path, sys
+import platform
import shutil
+import subprocess
import tempfile
import unittest
@@ -129,6 +131,9 @@ class PerfTest(unittest.TestCase):
self.assertEquals(dirs.pop(), args[0])
os.chdir = MagicMock(side_effect=chdir)
+ subprocess.check_call = MagicMock()
+ platform.system = MagicMock(return_value='Linux')
+
def _CallMain(self, *args):
self._test_output = path.join(TEST_WORKSPACE, "results.json")
all_args=[
@@ -139,17 +144,17 @@ class PerfTest(unittest.TestCase):
all_args += args
return run_perf.Main(all_args)
- def _LoadResults(self):
- with open(self._test_output) as f:
+ def _LoadResults(self, file_name=None):
+ with open(file_name or self._test_output) as f:
return json.load(f)
- def _VerifyResults(self, suite, units, traces):
+ def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
{"units": units,
"graphs": [suite, trace["name"]],
"results": trace["results"],
"stddev": trace["stddev"]} for trace in traces],
- self._LoadResults()["traces"])
+ self._LoadResults(file_name)["traces"])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
@@ -402,17 +407,69 @@ class PerfTest(unittest.TestCase):
# require lots of complicated mocks for the android tools.
def testAndroid(self):
self._WriteTestInput(V8_JSON)
- platform = run_perf.Platform
+ # FIXME(machenbach): This is not test-local!
+ platform = run_perf.AndroidPlatform
platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None)
platform.Run = MagicMock(
- return_value="Richards: 1.234\nDeltaBlue: 10657567\n")
+ return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
self.assertEquals(
0, self._CallMain("--android-build-tools", "/some/dir",
- "--arch", "android_arm"))
+ "--arch", "arm"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
+
+ def testTwoRuns_Trybot(self):
+ test_input = dict(V8_JSON)
+ test_input["run_count"] = 2
+ self._WriteTestInput(test_input)
+ self._MockCommand([".", ".", ".", "."],
+ ["Richards: 100\nDeltaBlue: 200\n",
+ "Richards: 200\nDeltaBlue: 20\n",
+ "Richards: 50\nDeltaBlue: 200\n",
+ "Richards: 100\nDeltaBlue: 20\n"])
+ test_output_no_patch = path.join(TEST_WORKSPACE, "results_no_patch.json")
+ self.assertEquals(0, self._CallMain(
+ "--outdir-no-patch", "out-no-patch",
+ "--json-test-results-no-patch", test_output_no_patch,
+ ))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
+ ])
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
+ ], test_output_no_patch)
+ self._VerifyErrors([])
+ self._VerifyMockMultiple(
+ (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ )
+
+ def testWrongBinaryWithProf(self):
+ test_input = dict(V8_JSON)
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
+ self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["1.234"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "x64.release", "d7"),
+ "--flag", "--prof", "run.js")
+
+ def testUnzip(self):
+ def Gen():
+ for i in [1, 2, 3]:
+ yield i, i + 1
+ l, r = run_perf.Unzip(Gen())
+ self.assertEquals([1, 2, 3], list(l()))
+ self.assertEquals([2, 3, 4], list(r()))
diff --git a/tools/v8-info.sh b/tools/v8-info.sh
index 1f25d147..838d92a0 100755
--- a/tools/v8-info.sh
+++ b/tools/v8-info.sh
@@ -30,11 +30,11 @@
########## Global variable definitions
BASE_URL="https://code.google.com/p/v8/source/list"
-VERSION="src/version.cc"
-MAJOR="MAJOR_VERSION"
-MINOR="MINOR_VERSION"
-BUILD="BUILD_NUMBER"
-PATCH="PATCH_LEVEL"
+VERSION="include/v8-version.h"
+MAJOR="V8_MAJOR_VERSION"
+MINOR="V8_MINOR_VERSION"
+BUILD="V8_BUILD_NUMBER"
+PATCH="V8_PATCH_LEVEL"
V8="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py
index 5e3e841f..0461bcbb 100644
--- a/tools/v8heapconst.py
+++ b/tools/v8heapconst.py
@@ -44,8 +44,6 @@ INSTANCE_TYPES = {
90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
0: "INTERNALIZED_STRING_TYPE",
4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
- 1: "CONS_INTERNALIZED_STRING_TYPE",
- 5: "CONS_ONE_BYTE_INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
@@ -53,229 +51,247 @@ INSTANCE_TYPES = {
22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
- 129: "MAP_TYPE",
- 130: "CODE_TYPE",
+ 130: "SIMD128_VALUE_TYPE",
+ 132: "MAP_TYPE",
+ 133: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 132: "CELL_TYPE",
- 133: "PROPERTY_CELL_TYPE",
- 134: "HEAP_NUMBER_TYPE",
+ 173: "CELL_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 129: "HEAP_NUMBER_TYPE",
+ 134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
- 137: "FREE_SPACE_TYPE",
- 138: "EXTERNAL_INT8_ARRAY_TYPE",
- 139: "EXTERNAL_UINT8_ARRAY_TYPE",
- 140: "EXTERNAL_INT16_ARRAY_TYPE",
- 141: "EXTERNAL_UINT16_ARRAY_TYPE",
- 142: "EXTERNAL_INT32_ARRAY_TYPE",
- 143: "EXTERNAL_UINT32_ARRAY_TYPE",
- 144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
- 145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
- 146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
- 147: "FIXED_INT8_ARRAY_TYPE",
- 148: "FIXED_UINT8_ARRAY_TYPE",
- 149: "FIXED_INT16_ARRAY_TYPE",
- 150: "FIXED_UINT16_ARRAY_TYPE",
- 151: "FIXED_INT32_ARRAY_TYPE",
- 152: "FIXED_UINT32_ARRAY_TYPE",
- 153: "FIXED_FLOAT32_ARRAY_TYPE",
- 154: "FIXED_FLOAT64_ARRAY_TYPE",
- 155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 157: "FILLER_TYPE",
- 158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 159: "DECLARED_ACCESSOR_INFO_TYPE",
- 160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 161: "ACCESSOR_PAIR_TYPE",
- 162: "ACCESS_CHECK_INFO_TYPE",
- 163: "INTERCEPTOR_INFO_TYPE",
- 164: "CALL_HANDLER_INFO_TYPE",
- 165: "FUNCTION_TEMPLATE_INFO_TYPE",
- 166: "OBJECT_TEMPLATE_INFO_TYPE",
- 167: "SIGNATURE_INFO_TYPE",
- 168: "TYPE_SWITCH_INFO_TYPE",
- 170: "ALLOCATION_MEMENTO_TYPE",
- 169: "ALLOCATION_SITE_TYPE",
- 171: "SCRIPT_TYPE",
- 172: "CODE_CACHE_TYPE",
- 173: "POLYMORPHIC_CODE_CACHE_TYPE",
- 174: "TYPE_FEEDBACK_INFO_TYPE",
- 175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 176: "BOX_TYPE",
- 179: "FIXED_ARRAY_TYPE",
- 156: "FIXED_DOUBLE_ARRAY_TYPE",
- 180: "CONSTANT_POOL_ARRAY_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 182: "JS_MESSAGE_OBJECT_TYPE",
- 185: "JS_VALUE_TYPE",
- 186: "JS_DATE_TYPE",
- 187: "JS_OBJECT_TYPE",
- 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 189: "JS_GENERATOR_OBJECT_TYPE",
- 190: "JS_MODULE_TYPE",
- 191: "JS_GLOBAL_OBJECT_TYPE",
- 192: "JS_BUILTINS_OBJECT_TYPE",
- 193: "JS_GLOBAL_PROXY_TYPE",
- 194: "JS_ARRAY_TYPE",
- 195: "JS_ARRAY_BUFFER_TYPE",
- 196: "JS_TYPED_ARRAY_TYPE",
- 197: "JS_DATA_VIEW_TYPE",
- 184: "JS_PROXY_TYPE",
- 198: "JS_SET_TYPE",
- 199: "JS_MAP_TYPE",
- 200: "JS_WEAK_MAP_TYPE",
- 201: "JS_WEAK_SET_TYPE",
- 202: "JS_REGEXP_TYPE",
+ 137: "BYTECODE_ARRAY_TYPE",
+ 138: "FREE_SPACE_TYPE",
+ 139: "FIXED_INT8_ARRAY_TYPE",
+ 140: "FIXED_UINT8_ARRAY_TYPE",
+ 141: "FIXED_INT16_ARRAY_TYPE",
+ 142: "FIXED_UINT16_ARRAY_TYPE",
+ 143: "FIXED_INT32_ARRAY_TYPE",
+ 144: "FIXED_UINT32_ARRAY_TYPE",
+ 145: "FIXED_FLOAT32_ARRAY_TYPE",
+ 146: "FIXED_FLOAT64_ARRAY_TYPE",
+ 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 151: "DECLARED_ACCESSOR_INFO_TYPE",
+ 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 153: "ACCESSOR_PAIR_TYPE",
+ 154: "ACCESS_CHECK_INFO_TYPE",
+ 155: "INTERCEPTOR_INFO_TYPE",
+ 156: "CALL_HANDLER_INFO_TYPE",
+ 157: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 158: "OBJECT_TEMPLATE_INFO_TYPE",
+ 159: "SIGNATURE_INFO_TYPE",
+ 160: "TYPE_SWITCH_INFO_TYPE",
+ 162: "ALLOCATION_MEMENTO_TYPE",
+ 161: "ALLOCATION_SITE_TYPE",
+ 163: "SCRIPT_TYPE",
+ 164: "CODE_CACHE_TYPE",
+ 165: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 166: "TYPE_FEEDBACK_INFO_TYPE",
+ 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 168: "BOX_TYPE",
+ 177: "PROTOTYPE_INFO_TYPE",
+ 178: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
+ 148: "FIXED_DOUBLE_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 174: "WEAK_CELL_TYPE",
+ 175: "TRANSITION_ARRAY_TYPE",
+ 181: "JS_MESSAGE_OBJECT_TYPE",
+ 180: "JS_VALUE_TYPE",
+ 182: "JS_DATE_TYPE",
+ 183: "JS_OBJECT_TYPE",
+ 184: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 185: "JS_GENERATOR_OBJECT_TYPE",
+ 186: "JS_MODULE_TYPE",
+ 187: "JS_GLOBAL_OBJECT_TYPE",
+ 188: "JS_GLOBAL_PROXY_TYPE",
+ 189: "JS_ARRAY_TYPE",
+ 190: "JS_ARRAY_BUFFER_TYPE",
+ 191: "JS_TYPED_ARRAY_TYPE",
+ 192: "JS_DATA_VIEW_TYPE",
+ 179: "JS_PROXY_TYPE",
+ 193: "JS_SET_TYPE",
+ 194: "JS_MAP_TYPE",
+ 195: "JS_SET_ITERATOR_TYPE",
+ 196: "JS_MAP_ITERATOR_TYPE",
+ 197: "JS_ITERATOR_RESULT_TYPE",
+ 198: "JS_WEAK_MAP_TYPE",
+ 199: "JS_WEAK_SET_TYPE",
+ 200: "JS_PROMISE_TYPE",
+ 201: "JS_REGEXP_TYPE",
+ 202: "JS_BOUND_FUNCTION_TYPE",
203: "JS_FUNCTION_TYPE",
- 183: "JS_FUNCTION_PROXY_TYPE",
- 177: "DEBUG_INFO_TYPE",
- 178: "BREAK_POINT_INFO_TYPE",
+ 169: "DEBUG_INFO_TYPE",
+ 170: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
- 0x080a9: (129, "MetaMap"),
- 0x080d1: (131, "OddballMap"),
- 0x080f9: (4, "OneByteInternalizedStringMap"),
- 0x08121: (179, "FixedArrayMap"),
- 0x08149: (134, "HeapNumberMap"),
- 0x08171: (137, "FreeSpaceMap"),
- 0x08199: (157, "OnePointerFillerMap"),
- 0x081c1: (157, "TwoPointerFillerMap"),
- 0x081e9: (132, "CellMap"),
- 0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (181, "SharedFunctionInfoMap"),
- 0x08261: (179, "NativeContextMap"),
- 0x08289: (130, "CodeMap"),
- 0x082b1: (179, "ScopeInfoMap"),
- 0x082d9: (179, "FixedCOWArrayMap"),
- 0x08301: (156, "FixedDoubleArrayMap"),
- 0x08329: (180, "ConstantPoolArrayMap"),
- 0x08351: (179, "HashTableMap"),
- 0x08379: (128, "SymbolMap"),
- 0x083a1: (64, "StringMap"),
- 0x083c9: (68, "OneByteStringMap"),
- 0x083f1: (65, "ConsStringMap"),
- 0x08419: (69, "ConsOneByteStringMap"),
- 0x08441: (67, "SlicedStringMap"),
- 0x08469: (71, "SlicedOneByteStringMap"),
- 0x08491: (66, "ExternalStringMap"),
- 0x084b9: (74, "ExternalStringWithOneByteDataMap"),
- 0x084e1: (70, "ExternalOneByteStringMap"),
- 0x08509: (82, "ShortExternalStringMap"),
- 0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08559: (0, "InternalizedStringMap"),
- 0x08581: (1, "ConsInternalizedStringMap"),
- 0x085a9: (5, "ConsOneByteInternalizedStringMap"),
- 0x085d1: (2, "ExternalInternalizedStringMap"),
- 0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08621: (6, "ExternalOneByteInternalizedStringMap"),
- 0x08649: (18, "ShortExternalInternalizedStringMap"),
- 0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08699: (22, "ShortExternalOneByteInternalizedStringMap"),
- 0x086c1: (86, "ShortExternalOneByteStringMap"),
- 0x086e9: (64, "UndetectableStringMap"),
- 0x08711: (68, "UndetectableOneByteStringMap"),
- 0x08739: (138, "ExternalInt8ArrayMap"),
- 0x08761: (139, "ExternalUint8ArrayMap"),
- 0x08789: (140, "ExternalInt16ArrayMap"),
- 0x087b1: (141, "ExternalUint16ArrayMap"),
- 0x087d9: (142, "ExternalInt32ArrayMap"),
- 0x08801: (143, "ExternalUint32ArrayMap"),
- 0x08829: (144, "ExternalFloat32ArrayMap"),
- 0x08851: (145, "ExternalFloat64ArrayMap"),
- 0x08879: (146, "ExternalUint8ClampedArrayMap"),
- 0x088a1: (148, "FixedUint8ArrayMap"),
- 0x088c9: (147, "FixedInt8ArrayMap"),
- 0x088f1: (150, "FixedUint16ArrayMap"),
- 0x08919: (149, "FixedInt16ArrayMap"),
- 0x08941: (152, "FixedUint32ArrayMap"),
- 0x08969: (151, "FixedInt32ArrayMap"),
- 0x08991: (153, "FixedFloat32ArrayMap"),
- 0x089b9: (154, "FixedFloat64ArrayMap"),
- 0x089e1: (155, "FixedUint8ClampedArrayMap"),
- 0x08a09: (179, "NonStrictArgumentsElementsMap"),
- 0x08a31: (179, "FunctionContextMap"),
- 0x08a59: (179, "CatchContextMap"),
- 0x08a81: (179, "WithContextMap"),
- 0x08aa9: (179, "BlockContextMap"),
- 0x08ad1: (179, "ModuleContextMap"),
- 0x08af9: (179, "GlobalContextMap"),
- 0x08b21: (182, "JSMessageObjectMap"),
- 0x08b49: (135, "ForeignMap"),
- 0x08b71: (187, "NeanderMap"),
- 0x08b99: (170, "AllocationMementoMap"),
- 0x08bc1: (169, "AllocationSiteMap"),
- 0x08be9: (173, "PolymorphicCodeCacheMap"),
- 0x08c11: (171, "ScriptMap"),
- 0x08c61: (187, "ExternalMap"),
- 0x08cb1: (176, "BoxMap"),
- 0x08cd9: (158, "DeclaredAccessorDescriptorMap"),
- 0x08d01: (159, "DeclaredAccessorInfoMap"),
- 0x08d29: (160, "ExecutableAccessorInfoMap"),
- 0x08d51: (161, "AccessorPairMap"),
- 0x08d79: (162, "AccessCheckInfoMap"),
- 0x08da1: (163, "InterceptorInfoMap"),
- 0x08dc9: (164, "CallHandlerInfoMap"),
- 0x08df1: (165, "FunctionTemplateInfoMap"),
- 0x08e19: (166, "ObjectTemplateInfoMap"),
- 0x08e41: (167, "SignatureInfoMap"),
- 0x08e69: (168, "TypeSwitchInfoMap"),
- 0x08e91: (172, "CodeCacheMap"),
- 0x08eb9: (174, "TypeFeedbackInfoMap"),
- 0x08ee1: (175, "AliasedArgumentsEntryMap"),
- 0x08f09: (177, "DebugInfoMap"),
- 0x08f31: (178, "BreakPointInfoMap"),
+ 0x080ad: (132, "MetaMap"),
+ 0x080d9: (131, "NullMap"),
+ 0x08105: (171, "FixedArrayMap"),
+ 0x08131: (4, "OneByteInternalizedStringMap"),
+ 0x0815d: (138, "FreeSpaceMap"),
+ 0x08189: (149, "OnePointerFillerMap"),
+ 0x081b5: (149, "TwoPointerFillerMap"),
+ 0x081e1: (131, "UndefinedMap"),
+ 0x0820d: (129, "HeapNumberMap"),
+ 0x08239: (131, "TheHoleMap"),
+ 0x08265: (131, "BooleanMap"),
+ 0x08291: (131, "UninitializedMap"),
+ 0x082bd: (173, "CellMap"),
+ 0x082e9: (176, "GlobalPropertyCellMap"),
+ 0x08315: (172, "SharedFunctionInfoMap"),
+ 0x08341: (134, "MutableHeapNumberMap"),
+ 0x0836d: (130, "Float32x4Map"),
+ 0x08399: (130, "Int32x4Map"),
+ 0x083c5: (130, "Uint32x4Map"),
+ 0x083f1: (130, "Bool32x4Map"),
+ 0x0841d: (130, "Int16x8Map"),
+ 0x08449: (130, "Uint16x8Map"),
+ 0x08475: (130, "Bool16x8Map"),
+ 0x084a1: (130, "Int8x16Map"),
+ 0x084cd: (130, "Uint8x16Map"),
+ 0x084f9: (130, "Bool8x16Map"),
+ 0x08525: (171, "NativeContextMap"),
+ 0x08551: (133, "CodeMap"),
+ 0x0857d: (171, "ScopeInfoMap"),
+ 0x085a9: (171, "FixedCOWArrayMap"),
+ 0x085d5: (148, "FixedDoubleArrayMap"),
+ 0x08601: (174, "WeakCellMap"),
+ 0x0862d: (175, "TransitionArrayMap"),
+ 0x08659: (68, "OneByteStringMap"),
+ 0x08685: (171, "FunctionContextMap"),
+ 0x086b1: (131, "NoInterceptorResultSentinelMap"),
+ 0x086dd: (131, "ArgumentsMarkerMap"),
+ 0x08709: (131, "ExceptionMap"),
+ 0x08735: (131, "TerminationExceptionMap"),
+ 0x08761: (171, "HashTableMap"),
+ 0x0878d: (171, "OrderedHashTableMap"),
+ 0x087b9: (128, "SymbolMap"),
+ 0x087e5: (64, "StringMap"),
+ 0x08811: (69, "ConsOneByteStringMap"),
+ 0x0883d: (65, "ConsStringMap"),
+ 0x08869: (67, "SlicedStringMap"),
+ 0x08895: (71, "SlicedOneByteStringMap"),
+ 0x088c1: (66, "ExternalStringMap"),
+ 0x088ed: (74, "ExternalStringWithOneByteDataMap"),
+ 0x08919: (70, "ExternalOneByteStringMap"),
+ 0x08945: (70, "NativeSourceStringMap"),
+ 0x08971: (82, "ShortExternalStringMap"),
+ 0x0899d: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x089c9: (0, "InternalizedStringMap"),
+ 0x089f5: (2, "ExternalInternalizedStringMap"),
+ 0x08a21: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08a4d: (6, "ExternalOneByteInternalizedStringMap"),
+ 0x08a79: (18, "ShortExternalInternalizedStringMap"),
+ 0x08aa5: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08ad1: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x08afd: (86, "ShortExternalOneByteStringMap"),
+ 0x08b29: (140, "FixedUint8ArrayMap"),
+ 0x08b55: (139, "FixedInt8ArrayMap"),
+ 0x08b81: (142, "FixedUint16ArrayMap"),
+ 0x08bad: (141, "FixedInt16ArrayMap"),
+ 0x08bd9: (144, "FixedUint32ArrayMap"),
+ 0x08c05: (143, "FixedInt32ArrayMap"),
+ 0x08c31: (145, "FixedFloat32ArrayMap"),
+ 0x08c5d: (146, "FixedFloat64ArrayMap"),
+ 0x08c89: (147, "FixedUint8ClampedArrayMap"),
+ 0x08cb5: (171, "SloppyArgumentsElementsMap"),
+ 0x08ce1: (171, "CatchContextMap"),
+ 0x08d0d: (171, "WithContextMap"),
+ 0x08d39: (171, "BlockContextMap"),
+ 0x08d65: (171, "ModuleContextMap"),
+ 0x08d91: (171, "ScriptContextMap"),
+ 0x08dbd: (171, "ScriptContextTableMap"),
+ 0x08de9: (181, "JSMessageObjectMap"),
+ 0x08e15: (135, "ForeignMap"),
+ 0x08e41: (183, "NeanderMap"),
+ 0x08e6d: (183, "ExternalMap"),
+ 0x08e99: (162, "AllocationMementoMap"),
+ 0x08ec5: (161, "AllocationSiteMap"),
+ 0x08ef1: (165, "PolymorphicCodeCacheMap"),
+ 0x08f1d: (163, "ScriptMap"),
+ 0x08f75: (137, "BytecodeArrayMap"),
+ 0x08fa1: (168, "BoxMap"),
+ 0x08fcd: (152, "ExecutableAccessorInfoMap"),
+ 0x08ff9: (153, "AccessorPairMap"),
+ 0x09025: (154, "AccessCheckInfoMap"),
+ 0x09051: (155, "InterceptorInfoMap"),
+ 0x0907d: (156, "CallHandlerInfoMap"),
+ 0x090a9: (157, "FunctionTemplateInfoMap"),
+ 0x090d5: (158, "ObjectTemplateInfoMap"),
+ 0x09101: (164, "CodeCacheMap"),
+ 0x0912d: (166, "TypeFeedbackInfoMap"),
+ 0x09159: (167, "AliasedArgumentsEntryMap"),
+ 0x09185: (169, "DebugInfoMap"),
+ 0x091b1: (170, "BreakPointInfoMap"),
+ 0x091dd: (177, "PrototypeInfoMap"),
+ 0x09209: (178, "SloppyBlockWithEvalContextExtensionMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("OLD_POINTER_SPACE", 0x08081): "NullValue",
- ("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
- ("OLD_POINTER_SPACE", 0x080a1): "TheHoleValue",
- ("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
- ("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
- ("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
- ("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
- ("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
- ("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
- ("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
- ("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
- ("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
- ("OLD_POINTER_SPACE", 0x09521): "TerminationException",
- ("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
- ("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x0ca65): "MegamorphicSymbol",
- ("OLD_POINTER_SPACE", 0x0ca75): "UninitializedSymbol",
- ("OLD_POINTER_SPACE", 0x10ae9): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x110fd): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x11105): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x11155): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x11189): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x141a5): "ObservationState",
- ("OLD_POINTER_SPACE", 0x141b1): "FrozenSymbol",
- ("OLD_POINTER_SPACE", 0x141c1): "NonExistentSymbol",
- ("OLD_POINTER_SPACE", 0x141d1): "ElementsTransitionSymbol",
- ("OLD_POINTER_SPACE", 0x141e1): "EmptySlowElementDictionary",
- ("OLD_POINTER_SPACE", 0x1437d): "ObservedSymbol",
- ("OLD_POINTER_SPACE", 0x1438d): "AllocationSitesScratchpad",
- ("OLD_POINTER_SPACE", 0x14795): "MicrotaskState",
- ("OLD_POINTER_SPACE", 0x36241): "StringTable",
- ("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
- ("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
- ("OLD_DATA_SPACE", 0x080a9): "NanValue",
- ("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x08149): "EmptyConstantPoolArray",
- ("OLD_DATA_SPACE", 0x0828d): "EmptyExternalInt8Array",
- ("OLD_DATA_SPACE", 0x08299): "EmptyExternalUint8Array",
- ("OLD_DATA_SPACE", 0x082a5): "EmptyExternalInt16Array",
- ("OLD_DATA_SPACE", 0x082b1): "EmptyExternalUint16Array",
- ("OLD_DATA_SPACE", 0x082bd): "EmptyExternalInt32Array",
- ("OLD_DATA_SPACE", 0x082c9): "EmptyExternalUint32Array",
- ("OLD_DATA_SPACE", 0x082d5): "EmptyExternalFloat32Array",
- ("OLD_DATA_SPACE", 0x082e1): "EmptyExternalFloat64Array",
- ("OLD_DATA_SPACE", 0x082ed): "EmptyExternalUint8ClampedArray",
- ("OLD_DATA_SPACE", 0x082f9): "InfinityValue",
- ("OLD_DATA_SPACE", 0x08305): "MinusZeroValue",
- ("CODE_SPACE", 0x138e1): "JsConstructEntryCode",
- ("CODE_SPACE", 0x21361): "JsEntryCode",
+ ("OLD_SPACE", 0x08081): "NullValue",
+ ("OLD_SPACE", 0x08095): "EmptyDescriptorArray",
+ ("OLD_SPACE", 0x0809d): "EmptyFixedArray",
+ ("OLD_SPACE", 0x080c9): "UndefinedValue",
+ ("OLD_SPACE", 0x080f5): "NanValue",
+ ("OLD_SPACE", 0x08105): "TheHoleValue",
+ ("OLD_SPACE", 0x08129): "TrueValue",
+ ("OLD_SPACE", 0x08161): "FalseValue",
+ ("OLD_SPACE", 0x08189): "empty_string",
+ ("OLD_SPACE", 0x08195): "hidden_string",
+ ("OLD_SPACE", 0x081a1): "UninitializedValue",
+ ("OLD_SPACE", 0x081d1): "EmptyByteArray",
+ ("OLD_SPACE", 0x081d9): "NoInterceptorResultSentinel",
+ ("OLD_SPACE", 0x08219): "ArgumentsMarker",
+ ("OLD_SPACE", 0x08249): "Exception",
+ ("OLD_SPACE", 0x08275): "TerminationException",
+ ("OLD_SPACE", 0x082ad): "NumberStringCache",
+ ("OLD_SPACE", 0x08ab5): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x08f4d): "StringSplitCache",
+ ("OLD_SPACE", 0x09355): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x0975d): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x0976d): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x0977d): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x0979d): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x097ad): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x097bd): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x097cd): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x097dd): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x097ed): "InfinityValue",
+ ("OLD_SPACE", 0x097fd): "MinusZeroValue",
+ ("OLD_SPACE", 0x0980d): "MinusInfinityValue",
+ ("OLD_SPACE", 0x0981d): "MessageListeners",
+ ("OLD_SPACE", 0x09839): "CodeStubs",
+ ("OLD_SPACE", 0x10201): "DummyVector",
+ ("OLD_SPACE", 0x1403d): "NonMonomorphicCache",
+ ("OLD_SPACE", 0x14651): "PolymorphicCodeCache",
+ ("OLD_SPACE", 0x14659): "NativesSourceCache",
+ ("OLD_SPACE", 0x148f5): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x14929): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14949): "ExperimentalExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14955): "EmptyScript",
+ ("OLD_SPACE", 0x14995): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x2e73d): "UndefinedCell",
+ ("OLD_SPACE", 0x2e745): "ObservationState",
+ ("OLD_SPACE", 0x2e751): "ScriptList",
+ ("OLD_SPACE", 0x2e8d9): "ClearedOptimizedCodeMap",
+ ("OLD_SPACE", 0x2e8e5): "EmptyWeakCell",
+ ("OLD_SPACE", 0x54715): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x54761): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x54875): "ArrayProtector",
+ ("OLD_SPACE", 0x54885): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x54895): "NoScriptSharedFunctionInfos",
+ ("OLD_SPACE", 0x5711d): "InterpreterTable",
+ ("OLD_SPACE", 0x57325): "EmptyBytecodeArray",
+ ("OLD_SPACE", 0x5a2d1): "StringTable",
+ ("CODE_SPACE", 0x1a2a1): "JsEntryCode",
+ ("CODE_SPACE", 0x1f081): "JsConstructEntryCode",
}
diff --git a/tools/verify_source_deps.py b/tools/verify_source_deps.py
new file mode 100755
index 00000000..50caace7
--- /dev/null
+++ b/tools/verify_source_deps.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Script to print potentially missing source dependencies based on the actual
+.h and .cc files in the source tree and which files are included in the gyp
+and gn files. The latter inclusion is overapproximated.
+
+TODO(machenbach): Gyp files in src will point to source files in src without a
+src/ prefix. For simplicity, all paths relative to src are stripped. But this
+tool won't be accurate for other sources in other directories (e.g. cctest).
+"""
+
+import itertools
+import re
+import os
+
+
+V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+V8_SRC_BASE = os.path.join(V8_BASE, 'src')
+V8_INCLUDE_BASE = os.path.join(V8_BASE, 'include')
+
+GYP_FILES = [
+ os.path.join(V8_BASE, 'src', 'd8.gyp'),
+ os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
+ os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
+ os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
+ os.path.join(V8_BASE, 'tools', 'gyp', 'v8.gyp'),
+ os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
+]
+
+
+def path_no_prefix(path):
+ if path.startswith('../'):
+ return path_no_prefix(path[3:])
+ elif path.startswith('src/'):
+ return path_no_prefix(path[4:])
+ else:
+ return path
+
+
+def isources(directory):
+ for root, dirs, files in os.walk(directory):
+ for f in files:
+ if not (f.endswith('.h') or f.endswith('.cc')):
+ continue
+ yield path_no_prefix(os.path.relpath(os.path.join(root, f), V8_BASE))
+
+
+def iflatten(obj):
+ if isinstance(obj, dict):
+ for value in obj.values():
+ for i in iflatten(value):
+ yield i
+ elif isinstance(obj, list):
+ for value in obj:
+ for i in iflatten(value):
+ yield i
+ elif isinstance(obj, basestring):
+ yield path_no_prefix(obj)
+
+
+def iflatten_gyp_file(gyp_file):
+ """Overaproximates all values in the gyp file.
+
+ Iterates over all string values recursively. Removes '../' path prefixes.
+ """
+ with open(gyp_file) as f:
+ return iflatten(eval(f.read()))
+
+
+def iflatten_gn_file(gn_file):
+ """Overaproximates all values in the gn file.
+
+ Iterates over all double quoted strings.
+ """
+ with open(gn_file) as f:
+ for line in f.read().splitlines():
+ match = re.match(r'.*"([^"]*)".*', line)
+ if match:
+ yield path_no_prefix(match.group(1))
+
+
+def icheck_values(values, *source_dirs):
+ for source_file in itertools.chain(
+ *[isources(source_dir) for source_dir in source_dirs]
+ ):
+ if source_file not in values:
+ yield source_file
+
+
+gyp_values = set(itertools.chain(
+ *[iflatten_gyp_file(gyp_file) for gyp_file in GYP_FILES]
+ ))
+
+print "----------- Files not in gyp: ------------"
+for i in sorted(icheck_values(gyp_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
+ print i
+
+gn_values = set(iflatten_gn_file(os.path.join(V8_BASE, 'BUILD.gn')))
+
+print "\n----------- Files not in gn: -------------"
+for i in sorted(icheck_values(gn_values, V8_SRC_BASE, V8_INCLUDE_BASE)):
+ print i
diff --git a/tools/vim/ninja-build.vim b/tools/vim/ninja-build.vim
new file mode 100644
index 00000000..3e9b8948
--- /dev/null
+++ b/tools/vim/ninja-build.vim
@@ -0,0 +1,119 @@
+" Copyright (c) 2015 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+"
+" Adds a "Compile this file" function, using ninja. On Mac, binds Cmd-k to
+" this command. On Windows, Ctrl-F7 (which is the same as the VS default).
+" On Linux, <Leader>o, which is \o by default ("o"=creates .o files)
+"
+" Adds a "Build this target" function, using ninja. This is not bound
+" to any key by default, but can be used via the :CrBuild command.
+" It builds 'd8' by default, but :CrBuild target1 target2 etc works as well,
+" i.e. :CrBuild all or :CrBuild d8 cctest unittests.
+"
+" Requires that gyp has already generated build.ninja files, and that ninja is
+" in your path (which it is automatically if depot_tools is in your path).
+" Bumps the number of parallel jobs in ninja automatically if goma is
+" detected.
+"
+" Add the following to your .vimrc file:
+" so /path/to/src/tools/vim/ninja-build.vim
+
+python << endpython
+import os
+import vim
+
+
+def path_to_current_buffer():
+ """Returns the absolute path of the current buffer."""
+ return vim.current.buffer.name
+
+
+def path_to_source_root():
+ """Returns the absolute path to the V8 source root."""
+ candidate = os.path.dirname(path_to_current_buffer())
+ # This is a list of files that need to identify the src directory. The shorter
+ # it is, the more likely it's wrong. The longer it is, the more likely it is to
+ # break when we rename directories.
+ fingerprints = ['.git', 'build', 'include', 'samples', 'src', 'testing',
+ 'third_party', 'tools']
+ while candidate and not all(
+ [os.path.isdir(os.path.join(candidate, fp)) for fp in fingerprints]):
+ candidate = os.path.dirname(candidate)
+ return candidate
+
+
+def path_to_build_dir(configuration):
+ """Returns <v8_root>/<output_dir>/(Release|Debug)."""
+
+ v8_root = path_to_source_root()
+ sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+ from ninja_output import GetNinjaOutputDirectory
+ return GetNinjaOutputDirectory(v8_root, configuration)
+
+
+def compute_ninja_command_for_targets(targets='', configuration=None):
+ flags = []
+ if "use_goma=1" in os.getenv('GYP_DEFINES', '').split(' '):
+ flags = ['-j', '512']
+ build_dir = path_to_build_dir(configuration);
+ build_cmd = ' '.join(['ninja'] + flags + ['-C', build_dir, targets])
+ vim.command('return "%s"' % build_cmd)
+
+
+def compute_ninja_command_for_current_buffer(configuration=None):
+ """Returns the shell command to compile the file in the current buffer."""
+ build_dir = path_to_build_dir(configuration)
+
+ # ninja needs filepaths for the ^ syntax to be relative to the
+ # build directory.
+ file_to_build = path_to_current_buffer()
+ file_to_build = os.path.relpath(file_to_build, build_dir) + '^'
+ if sys.platform == 'win32':
+ # Escape \ for Vim, and ^ for both Vim and shell.
+ file_to_build = file_to_build.replace('\\', '\\\\').replace('^', '^^^^')
+ compute_ninja_command_for_targets(file_to_build, configuration)
+endpython
+
+fun! s:MakeWithCustomCommand(build_cmd)
+ let l:oldmakepgr = &makeprg
+ let &makeprg=a:build_cmd
+ silent make | cwindow
+ if !has('gui_running')
+ redraw!
+ endif
+ let &makeprg = l:oldmakepgr
+endfun
+
+fun! s:NinjaCommandForCurrentBuffer()
+ python compute_ninja_command_for_current_buffer()
+endfun
+
+fun! s:NinjaCommandForTargets(targets)
+ python compute_ninja_command_for_targets(vim.eval('a:targets'))
+endfun
+
+fun! CrCompileFile()
+ call s:MakeWithCustomCommand(s:NinjaCommandForCurrentBuffer())
+endfun
+
+fun! CrBuild(...)
+ let l:targets = a:0 > 0 ? join(a:000, ' ') : ''
+ if (l:targets !~ '\i')
+ let l:targets = 'd8'
+ endif
+ call s:MakeWithCustomCommand(s:NinjaCommandForTargets(l:targets))
+endfun
+
+command! CrCompileFile call CrCompileFile()
+command! -nargs=* CrBuild call CrBuild(<q-args>)
+
+if has('mac')
+ map <D-k> :CrCompileFile<cr>
+ imap <D-k> <esc>:CrCompileFile<cr>
+elseif has('win32')
+ map <C-F7> :CrCompileFile<cr>
+ imap <C-F7> <esc>:CrCompileFile<cr>
+elseif has('unix')
+ map <Leader>o :CrCompileFile<cr>
+endif
diff --git a/tools/whitespace.txt b/tools/whitespace.txt
index 657e68f4..687be113 100644
--- a/tools/whitespace.txt
+++ b/tools/whitespace.txt
@@ -2,7 +2,7 @@ You can modify this file to create no-op changelists.
Try to write something funny. And please don't add trailing whitespace.
-A Smi walks into a bar and says:
+A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up...........
+The Smi looked at them when a crazy v8-autoroll account showed up.....