summaryrefslogtreecommitdiff
path: root/grpc/tools/run_tests
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2020-10-14 15:35:59 +0200
committerJeff Vander Stoep <jeffv@google.com>2020-10-14 15:44:30 +0200
commit3adfea8b276f06db63ae4182233560537aca2ffb (patch)
treea543e650c0d9b57239020605562e73d35f9db409 /grpc/tools/run_tests
parente028ffbabf8620351ce637bb48a8bb539c753c38 (diff)
downloadgrpcio-sys-3adfea8b276f06db63ae4182233560537aca2ffb.tar.gz
Import grpcio-sys 0.6.0
And add metadata files using the following command: get_rust_pkg.py --add3prf -v grpcio-sys-0.6.0 -o grpcio-sys Use LICENSE file from parent grpcio crate. Add README.android as requested during security review. Test: None Change-Id: Ib5f5c2af6f51d50c536cc4b6979666d480a57041
Diffstat (limited to 'grpc/tools/run_tests')
-rw-r--r--grpc/tools/run_tests/README.md53
-rw-r--r--grpc/tools/run_tests/artifacts/__init__.py13
-rw-r--r--grpc/tools/run_tests/artifacts/artifact_targets.py399
-rw-r--r--grpc/tools/run_tests/artifacts/build_artifact_csharp.bat50
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_csharp.sh33
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_csharp_android.sh24
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_csharp_ios.sh23
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_php.sh24
-rw-r--r--grpc/tools/run_tests/artifacts/build_artifact_protoc.bat36
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_protoc.sh27
-rw-r--r--grpc/tools/run_tests/artifacts/build_artifact_python.bat61
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_python.sh145
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_artifact_ruby.sh64
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_package_php.sh23
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_package_python.sh50
-rwxr-xr-xgrpc/tools/run_tests/artifacts/build_package_ruby.sh61
-rw-r--r--grpc/tools/run_tests/artifacts/distribtest_targets.py352
-rw-r--r--grpc/tools/run_tests/artifacts/package_targets.py163
-rwxr-xr-xgrpc/tools/run_tests/artifacts/run_in_workspace.sh37
-rw-r--r--grpc/tools/run_tests/build_stats/build_stats_schema.json56
-rw-r--r--grpc/tools/run_tests/build_stats/build_stats_schema_no_matrix.json49
-rwxr-xr-xgrpc/tools/run_tests/dockerize/build_and_run_docker.sh91
-rwxr-xr-xgrpc/tools/run_tests/dockerize/build_docker_and_run_tests.sh90
-rwxr-xr-xgrpc/tools/run_tests/dockerize/build_interop_image.sh127
-rwxr-xr-xgrpc/tools/run_tests/dockerize/docker_run.sh44
-rwxr-xr-xgrpc/tools/run_tests/dockerize/docker_run_tests.sh48
-rw-r--r--grpc/tools/run_tests/helper_scripts/build_csharp.bat27
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/build_csharp.sh26
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/build_php.sh37
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/build_python.sh231
-rw-r--r--grpc/tools/run_tests/helper_scripts/build_python_msys2.sh21
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/build_ruby.sh28
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/bundle_install_wrapper.sh31
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/post_tests_c.sh30
-rw-r--r--grpc/tools/run_tests/helper_scripts/post_tests_csharp.bat39
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/post_tests_csharp.sh29
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/post_tests_php.sh31
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/post_tests_python.sh24
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/post_tests_ruby.sh31
-rw-r--r--grpc/tools/run_tests/helper_scripts/pre_build_cmake.bat35
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/pre_build_cmake.sh24
-rw-r--r--grpc/tools/run_tests/helper_scripts/pre_build_csharp.bat43
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/pre_build_csharp.sh28
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/pre_build_ruby.sh24
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/prep_xds.sh37
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/run_grpc-node.sh33
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/run_python.sh30
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/run_ruby.sh21
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh38
-rwxr-xr-xgrpc/tools/run_tests/helper_scripts/run_tests_in_workspace.sh34
-rw-r--r--grpc/tools/run_tests/interop/interop_html_report.template201
-rwxr-xr-xgrpc/tools/run_tests/interop/with_nvm.sh20
-rwxr-xr-xgrpc/tools/run_tests/interop/with_rvm.sh20
-rwxr-xr-xgrpc/tools/run_tests/lb_interop_tests/gen_build_yaml.py345
-rw-r--r--grpc/tools/run_tests/performance/README.md134
-rw-r--r--grpc/tools/run_tests/performance/__init__.py13
-rwxr-xr-xgrpc/tools/run_tests/performance/bq_upload_result.py185
-rwxr-xr-xgrpc/tools/run_tests/performance/build_performance.sh85
-rwxr-xr-xgrpc/tools/run_tests/performance/build_performance_go.sh30
-rwxr-xr-xgrpc/tools/run_tests/performance/build_performance_node.sh29
-rwxr-xr-xgrpc/tools/run_tests/performance/build_performance_php7.sh30
-rwxr-xr-xgrpc/tools/run_tests/performance/kill_workers.sh44
-rw-r--r--grpc/tools/run_tests/performance/massage_qps_stats.py511
-rw-r--r--grpc/tools/run_tests/performance/massage_qps_stats_helpers.py62
-rwxr-xr-xgrpc/tools/run_tests/performance/patch_scenario_results_schema.py59
-rwxr-xr-xgrpc/tools/run_tests/performance/process_local_perf_flamegraphs.sh28
-rwxr-xr-xgrpc/tools/run_tests/performance/process_remote_perf_flamegraphs.sh33
-rwxr-xr-xgrpc/tools/run_tests/performance/remote_host_build.sh22
-rwxr-xr-xgrpc/tools/run_tests/performance/remote_host_prepare.sh43
-rwxr-xr-xgrpc/tools/run_tests/performance/run_netperf.sh30
-rwxr-xr-xgrpc/tools/run_tests/performance/run_qps_driver.sh25
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_csharp.sh23
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_go.sh23
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_java.sh24
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_node.sh33
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_php.sh24
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_python.sh20
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_python_asyncio.sh20
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_ruby.sh22
-rwxr-xr-xgrpc/tools/run_tests/performance/run_worker_rust.sh19
-rw-r--r--grpc/tools/run_tests/performance/scenario_config.py1490
-rw-r--r--grpc/tools/run_tests/performance/scenario_result_schema.json1899
-rw-r--r--grpc/tools/run_tests/python_utils/__init__.py13
-rwxr-xr-xgrpc/tools/run_tests/python_utils/antagonist.py18
-rw-r--r--grpc/tools/run_tests/python_utils/check_on_pr.py140
-rwxr-xr-xgrpc/tools/run_tests/python_utils/dockerjob.py156
-rw-r--r--grpc/tools/run_tests/python_utils/filter_pull_request_tests.py201
-rwxr-xr-xgrpc/tools/run_tests/python_utils/jobset.py583
-rwxr-xr-xgrpc/tools/run_tests/python_utils/port_server.py206
-rw-r--r--grpc/tools/run_tests/python_utils/report_utils.py180
-rw-r--r--grpc/tools/run_tests/python_utils/start_port_server.py129
-rwxr-xr-xgrpc/tools/run_tests/python_utils/upload_rbe_results.py293
-rw-r--r--grpc/tools/run_tests/python_utils/upload_test_results.py176
-rwxr-xr-xgrpc/tools/run_tests/python_utils/watch_dirs.py60
-rwxr-xr-xgrpc/tools/run_tests/run_build_statistics.py250
-rwxr-xr-xgrpc/tools/run_tests/run_grpclb_interop_tests.py605
-rwxr-xr-xgrpc/tools/run_tests/run_interop_tests.py1719
-rwxr-xr-xgrpc/tools/run_tests/run_microbenchmark.py261
-rwxr-xr-xgrpc/tools/run_tests/run_performance_tests.py719
-rwxr-xr-xgrpc/tools/run_tests/run_tests.py1981
-rwxr-xr-xgrpc/tools/run_tests/run_tests_matrix.py577
-rwxr-xr-xgrpc/tools/run_tests/run_xds_tests.py1278
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_bad_dependencies.sh31
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_bazel_workspace.py184
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_buildifier.sh31
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_cache_mk.sh24
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_deprecated_grpc++.py189
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_owners.sh29
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_port_platform.py67
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_qps_scenario_changes.py34
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_shellcheck.sh29
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_submodules.sh47
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_test_filtering.py161
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_tracer_sanity.py47
-rwxr-xr-xgrpc/tools/run_tests/sanity/check_version.py85
-rwxr-xr-xgrpc/tools/run_tests/sanity/core_banned_functions.py75
-rwxr-xr-xgrpc/tools/run_tests/sanity/core_untyped_structs.sh27
-rwxr-xr-xgrpc/tools/run_tests/sanity/cpp_banned_constructs.sh49
-rw-r--r--grpc/tools/run_tests/sanity/sanity_tests.yaml31
-rwxr-xr-xgrpc/tools/run_tests/start_port_server.py30
-rwxr-xr-xgrpc/tools/run_tests/task_runner.py120
121 files changed, 19456 insertions, 0 deletions
diff --git a/grpc/tools/run_tests/README.md b/grpc/tools/run_tests/README.md
new file mode 100644
index 00000000..cab917ef
--- /dev/null
+++ b/grpc/tools/run_tests/README.md
@@ -0,0 +1,53 @@
+# Overview
+
+This directory contains scripts that facilitate building and running tests. We are using python scripts as entrypoint for our
+tests because that gives us the opportunity to run tests using the same commandline regardless of the platform you are using.
+
+# Unit tests (run_tests.py)
+
+Builds gRPC in given language and runs unit tests. Use `tools/run_tests/run_tests.py --help` for more help.
+
+###### Example
+`tools/run_tests/run_tests.py -l csharp -c dbg`
+
+###### Useful options (among many others)
+- `--use_docker` Builds a docker container containing all the prerequisites for given language and runs the tests under that container.
+- `--build_only` Only build, do not run the tests.
+
+Note: If you get an error such as `ImportError: No module named httplib2`, then you may be missing some Python modules. Install the module listed in the error and try again.
+
+Note: some tests may be flaky. Check the "Issues" tab for known flakes and other issues.
+
+The full suite of unit tests will take many minutes to run.
+
+# Interop tests (run_interop_tests.py)
+
+Runs tests for cross-platform/cross-language interoperability. For more details, see [Interop tests descriptions](/doc/interop-test-descriptions.md)
+The script is also capable of running interop tests for grpc-java and grpc-go, using sources checked out alongside the ones of the grpc repository.
+
+###### Example
+`tools/run_tests/run_interop_tests.py -l csharp -s c++ --use_docker` (run interop tests with C# client and C++ server)
+
+Note: if you see an error like `no space left on device` when running the
+interop tests using Docker, make sure that Docker is building the image files in
+a location with sufficient disk space.
+
+# Performance benchmarks (run_performance_tests.py)
+
+Runs predefined benchmark scenarios for given languages. Besides the simple configuration of running all the scenarios locally,
+the script also supports orchestrating test runs with client and server running on different machines and uploading the results
+to BigQuery.
+
+###### Example
+`tools/run_tests/run_performance_tests.py -l c++ node`
+
+###### Useful options
+- `--regex` use regex to select particular scenarios to run.
+
+# Artifacts & Packages (task_runner.py)
+
+A generalized framework for running predefined tasks based on their labels. We use this to building binary artifacts & distrib packages and testing them)
+
+###### Example
+`tools/run_tests/task_runner.py -f python artifact linux x64` (build tasks with labels `python`, `artifact`, `linux`, and `x64`)
+
diff --git a/grpc/tools/run_tests/artifacts/__init__.py b/grpc/tools/run_tests/artifacts/__init__.py
new file mode 100644
index 00000000..5772620b
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/grpc/tools/run_tests/artifacts/artifact_targets.py b/grpc/tools/run_tests/artifacts/artifact_targets.py
new file mode 100644
index 00000000..f748a9a9
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/artifact_targets.py
@@ -0,0 +1,399 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Definition of targets to build artifacts."""
+
+import os.path
+import random
+import string
+import sys
+
+sys.path.insert(0, os.path.abspath('..'))
+import python_utils.jobset as jobset
+
+
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0,
+ timeout_seconds=30 * 60,
+ docker_base_image=None,
+ extra_docker_args=None,
+ verbose_success=False):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+ environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+ 'OUTPUT_DIR': 'artifacts'
+ }
+
+ if docker_base_image is not None:
+ docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
+ if extra_docker_args is not None:
+ docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='build_artifact.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ verbose_success=verbose_success)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ={},
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0,
+ timeout_seconds=30 * 60,
+ use_workspace=False,
+ cpu_cost=1.0,
+ verbose_success=False):
+ """Creates jobspec."""
+ environ = environ.copy()
+ if use_workspace:
+ environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+ environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
+ cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+ ] + cmdline
+ else:
+ environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
+
+ jobspec = jobset.JobSpec(cmdline=cmdline,
+ environ=environ,
+ shortname='build_artifact.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ shell=shell,
+ cpu_cost=cpu_cost,
+ verbose_success=verbose_success)
+ return jobspec
+
+
+_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
+
+_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
+
+
+class PythonArtifact:
+ """Builds Python artifacts."""
+
+ def __init__(self, platform, arch, py_version):
+ self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'python', platform, arch, py_version]
+ self.py_version = py_version
+ if 'manylinux' in platform:
+ self.labels.append('linux')
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ environ = {}
+ if self.platform == 'linux_extra':
+ # Raspberry Pi build
+ environ['PYTHON'] = '/usr/local/bin/python{}'.format(
+ self.py_version)
+ environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
+ # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
+ # A QEMU bug causes submodule update to hang, so we copy directly
+ environ['RELATIVE_COPY_PATH'] = '.'
+ # Parallel builds are counterproductive in emulated environment
+ environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'] = '1'
+ extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+ 'tools/run_tests/artifacts/build_artifact_python.sh',
+ environ=environ,
+ timeout_seconds=60 * 60 * 5,
+ docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
+ extra_docker_args=extra_args)
+ elif 'manylinux' in self.platform:
+ if self.arch == 'x86':
+ environ['SETARCH_CMD'] = 'linux32'
+ # Inside the manylinux container, the python installations are located in
+ # special places...
+ environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
+ self.py_version)
+ environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
+ # Platform autodetection for the manylinux1 image breaks so we set the
+ # defines ourselves.
+ # TODO(atash) get better platform-detection support in core so we don't
+ # need to do this manually...
+ environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
+ environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
+ environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
+
+ return create_docker_jobspec(
+ self.name,
+ # NOTE(rbellevi): Do *not* update this without also ensuring the
+ # base_docker_image attribute is accurate.
+ 'tools/dockerfile/grpc_artifact_python_%s_%s' %
+ (self.platform, self.arch),
+ 'tools/run_tests/artifacts/build_artifact_python.sh',
+ environ=environ,
+ timeout_seconds=60 * 60,
+ docker_base_image='quay.io/pypa/manylinux1_i686'
+ if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
+ elif self.platform == 'windows':
+ if 'Python27' in self.py_version:
+ environ['EXT_COMPILER'] = 'mingw32'
+ else:
+ environ['EXT_COMPILER'] = 'msvc'
+ # For some reason, the batch script %random% always runs with the same
+ # seed. We create a random temp-dir here
+ dir = ''.join(
+ random.choice(string.ascii_uppercase) for _ in range(10))
+ return create_jobspec(self.name, [
+ 'tools\\run_tests\\artifacts\\build_artifact_python.bat',
+ self.py_version, '32' if self.arch == 'x86' else '64'
+ ],
+ environ=environ,
+ timeout_seconds=45 * 60,
+ use_workspace=True)
+ else:
+ environ['PYTHON'] = self.py_version
+ environ['SKIP_PIP_INSTALL'] = 'TRUE'
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_python.sh'],
+ environ=environ,
+ timeout_seconds=60 * 60 * 2,
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
+
+
+class RubyArtifact:
+ """Builds ruby native gem."""
+
+ def __init__(self, platform, arch):
+ self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'ruby', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ # Ruby build uses docker internally and docker cannot be nested.
+ # We are using a custom workspace instead.
+ return create_jobspec(
+ self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
+ use_workspace=True,
+ timeout_seconds=45 * 60)
+
+
+class CSharpExtArtifact:
+ """Builds C# native extension library"""
+
+ def __init__(self, platform, arch, arch_abi=None):
+ self.name = 'csharp_ext_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.arch_abi = arch_abi
+ self.labels = ['artifact', 'csharp', platform, arch]
+ if arch_abi:
+ self.name += '_%s' % arch_abi
+ self.labels.append(arch_abi)
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.arch == 'android':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_android_ndk',
+ 'tools/run_tests/artifacts/build_artifact_csharp_android.sh',
+ environ={'ANDROID_ABI': self.arch_abi})
+ elif self.arch == 'ios':
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_csharp_ios.sh'],
+ use_workspace=True)
+ elif self.platform == 'windows':
+ return create_jobspec(self.name, [
+ 'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
+ self.arch
+ ],
+ use_workspace=True)
+ else:
+ if self.platform == 'linux':
+ cmake_arch_option = '' # x64 is the default architecture
+ if self.arch == 'x86':
+ # TODO(jtattermusch): more work needed to enable
+ # boringssl assembly optimizations for 32-bit linux.
+ # Problem: currently we are building the artifact under
+ # 32-bit docker image, but CMAKE_SYSTEM_PROCESSOR is still
+ # set to x86_64, so the resulting boringssl binary
+ # would have undefined symbols.
+ cmake_arch_option = '-DOPENSSL_NO_ASM=ON'
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_centos6_%s' % self.arch,
+ 'tools/run_tests/artifacts/build_artifact_csharp.sh',
+ environ={'CMAKE_ARCH_OPTION': cmake_arch_option})
+ else:
+ cmake_arch_option = '' # x64 is the default architecture
+ if self.arch == 'x86':
+ cmake_arch_option = '-DCMAKE_OSX_ARCHITECTURES=i386'
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
+ environ={'CMAKE_ARCH_OPTION': cmake_arch_option},
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
+
+
+class PHPArtifact:
+ """Builds PHP PECL package"""
+
+ def __init__(self, platform, arch):
+ self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'php', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_centos6_{}'.format(self.arch),
+ 'tools/run_tests/artifacts/build_artifact_php.sh')
+
+
+class ProtocArtifact:
+ """Builds protoc and protoc-plugin artifacts"""
+
+ def __init__(self, platform, arch):
+ self.name = 'protoc_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.labels = ['artifact', 'protoc', platform, arch]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform != 'windows':
+ cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
+ ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
+ if self.platform != 'macos':
+ ldflags += ' -static-libgcc -static-libstdc++ -s'
+ environ = {
+ 'CONFIG': 'opt',
+ 'CXXFLAGS': cxxflags,
+ 'LDFLAGS': ldflags,
+ 'PROTOBUF_LDFLAGS_EXTRA': ldflags
+ }
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/grpc_artifact_protoc',
+ 'tools/run_tests/artifacts/build_artifact_protoc.sh',
+ environ=environ)
+ else:
+ environ[
+ 'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
+ return create_jobspec(
+ self.name,
+ ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
+ environ=environ,
+ timeout_seconds=60 * 60,
+ use_workspace=True)
+ else:
+ generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
+ return create_jobspec(
+ self.name,
+ ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
+ environ={'generator': generator},
+ use_workspace=True)
+
+ def __str__(self):
+ return self.name
+
+
+def targets():
+ """Gets list of supported targets"""
+ return ([
+ Cls(platform, arch) for Cls in (CSharpExtArtifact, ProtocArtifact)
+ for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
+ ] + [
+ CSharpExtArtifact('linux', 'android', arch_abi='arm64-v8a'),
+ CSharpExtArtifact('linux', 'android', arch_abi='armeabi-v7a'),
+ CSharpExtArtifact('linux', 'android', arch_abi='x86'),
+ CSharpExtArtifact('macos', 'ios'),
+ # TODO(https://github.com/grpc/grpc/issues/20283)
+ # Add manylinux2010_x86 targets once this issue is resolved.
+ PythonArtifact('manylinux2010', 'x86', 'cp27-cp27m'),
+ PythonArtifact('manylinux2010', 'x86', 'cp27-cp27mu'),
+ PythonArtifact('manylinux2010', 'x86', 'cp35-cp35m'),
+ PythonArtifact('manylinux2010', 'x86', 'cp36-cp36m'),
+ PythonArtifact('manylinux2010', 'x86', 'cp37-cp37m'),
+ PythonArtifact('manylinux2010', 'x86', 'cp38-cp38'),
+ PythonArtifact('linux_extra', 'armv7', '2.7'),
+ PythonArtifact('linux_extra', 'armv7', '3.5'),
+ PythonArtifact('linux_extra', 'armv7', '3.6'),
+ PythonArtifact('linux_extra', 'armv6', '2.7'),
+ PythonArtifact('linux_extra', 'armv6', '3.5'),
+ PythonArtifact('linux_extra', 'armv6', '3.6'),
+ PythonArtifact('manylinux2010', 'x64', 'cp27-cp27m'),
+ PythonArtifact('manylinux2010', 'x64', 'cp27-cp27mu'),
+ PythonArtifact('manylinux2010', 'x64', 'cp35-cp35m'),
+ PythonArtifact('manylinux2010', 'x64', 'cp36-cp36m'),
+ PythonArtifact('manylinux2010', 'x64', 'cp37-cp37m'),
+ PythonArtifact('manylinux2010', 'x64', 'cp38-cp38'),
+ PythonArtifact('macos', 'x64', 'python2.7'),
+ PythonArtifact('macos', 'x64', 'python3.5'),
+ PythonArtifact('macos', 'x64', 'python3.6'),
+ PythonArtifact('macos', 'x64', 'python3.7'),
+ PythonArtifact('macos', 'x64', 'python3.8'),
+ PythonArtifact('windows', 'x86', 'Python27_32bit'),
+ PythonArtifact('windows', 'x86', 'Python35_32bit'),
+ PythonArtifact('windows', 'x86', 'Python36_32bit'),
+ PythonArtifact('windows', 'x86', 'Python37_32bit'),
+ PythonArtifact('windows', 'x86', 'Python38_32bit'),
+ PythonArtifact('windows', 'x64', 'Python27'),
+ PythonArtifact('windows', 'x64', 'Python35'),
+ PythonArtifact('windows', 'x64', 'Python36'),
+ PythonArtifact('windows', 'x64', 'Python37'),
+ PythonArtifact('windows', 'x64', 'Python38'),
+ RubyArtifact('linux', 'x64'),
+ RubyArtifact('macos', 'x64'),
+ PHPArtifact('linux', 'x64')
+ ])
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_csharp.bat b/grpc/tools/run_tests/artifacts/build_artifact_csharp.bat
new file mode 100644
index 00000000..713e480f
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_csharp.bat
@@ -0,0 +1,50 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Builds C# artifacts on Windows
+
+set ARCHITECTURE=%1
+
+@rem enter repo root
+cd /d %~dp0\..\..\..
+
+mkdir cmake
+cd cmake
+mkdir build
+cd build
+mkdir %ARCHITECTURE%
+cd %ARCHITECTURE%
+
+@rem TODO(jtattermusch): is there a better way to force using MSVC?
+@rem select the MSVC compiler explicitly to avoid using gcc from mingw or cygwin
+@rem (both are on path)
+set "MSVC_COMPILER=C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/cl.exe"
+if "%ARCHITECTURE%" == "x64" (
+ set "MSVC_COMPILER=C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64/cl.exe"
+)
+
+call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" %ARCHITECTURE%
+cmake -G Ninja -DCMAKE_C_COMPILER="%MSVC_COMPILER%" -DCMAKE_CXX_COMPILER="%MSVC_COMPILER%" -DCMAKE_BUILD_TYPE=RelWithDebInfo -DgRPC_BUILD_TESTS=OFF -DgRPC_MSVC_STATIC_RUNTIME=ON ../../.. || goto :error
+cmake --build . --target grpc_csharp_ext
+cd ..\..\..
+
+mkdir -p %ARTIFACTS_OUT%
+copy /Y cmake\build\%ARCHITECTURE%\grpc_csharp_ext.dll %ARTIFACTS_OUT% || goto :error
+copy /Y cmake\build\%ARCHITECTURE%\grpc_csharp_ext.pdb %ARTIFACTS_OUT% || goto :error
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_csharp.sh b/grpc/tools/run_tests/artifacts/build_artifact_csharp.sh
new file mode 100755
index 00000000..bb8a91b5
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_csharp.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+mkdir -p cmake/build
+cd cmake/build
+
+cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo \
+ -DgRPC_BACKWARDS_COMPATIBILITY_MODE=ON \
+ -DgRPC_BUILD_TESTS=OFF \
+ "${CMAKE_ARCH_OPTION}" \
+ ../..
+
+make grpc_csharp_ext -j2
+cd ../..
+
+mkdir -p "${ARTIFACTS_OUT}"
+cp cmake/build/libgrpc_csharp_ext.so "${ARTIFACTS_OUT}" || cp cmake/build/libgrpc_csharp_ext.dylib "${ARTIFACTS_OUT}"
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_csharp_android.sh b/grpc/tools/run_tests/artifacts/build_artifact_csharp_android.sh
new file mode 100755
index 00000000..067eb30e
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_csharp_android.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# ANDROID_ABI is set by the job definition in artifact_targets.py
+src/csharp/experimental/build_native_ext_for_android.sh
+
+mkdir -p "${ARTIFACTS_OUT}"
+cp cmake/build/libgrpc_csharp_ext.so "${ARTIFACTS_OUT}"
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_csharp_ios.sh b/grpc/tools/run_tests/artifacts/build_artifact_csharp_ios.sh
new file mode 100755
index 00000000..c902a45d
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_csharp_ios.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+src/csharp/experimental/build_native_ext_for_ios.sh
+
+mkdir -p "${ARTIFACTS_OUT}"
+cp libs/ios/libgrpc_csharp_ext.a libs/ios/libgrpc.a "${ARTIFACTS_OUT}"
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_php.sh b/grpc/tools/run_tests/artifacts/build_artifact_php.sh
new file mode 100755
index 00000000..9372dc9b
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_php.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+mkdir -p "${ARTIFACTS_OUT}"
+
+pear package
+
+cp -r grpc-*.tgz "${ARTIFACTS_OUT}"/
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_protoc.bat b/grpc/tools/run_tests/artifacts/build_artifact_protoc.bat
new file mode 100644
index 00000000..1d5bf034
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_protoc.bat
@@ -0,0 +1,36 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+mkdir -p %ARTIFACTS_OUT%
+
+@rem enter repo root
+cd /d %~dp0\..\..\..
+
+mkdir cmake
+cd cmake
+mkdir build
+cd build
+
+cmake -G "%generator%" -DgRPC_BUILD_TESTS=OFF -DgRPC_MSVC_STATIC_RUNTIME=ON ../.. || goto :error
+cmake --build . --target protoc --config Release || goto :error
+cmake --build . --target plugins --config Release || goto :error
+cd ..\..
+
+xcopy /Y cmake\build\third_party\protobuf\Release\protoc.exe %ARTIFACTS_OUT%\ || goto :error
+xcopy /Y cmake\build\Release\*_plugin.exe %ARTIFACTS_OUT%\ || goto :error
+
+goto :EOF
+
+:error
+exit /b 1 \ No newline at end of file
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_protoc.sh b/grpc/tools/run_tests/artifacts/build_artifact_protoc.sh
new file mode 100755
index 00000000..a5b6e2f3
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_protoc.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use devtoolset environment that has GCC 4.8 before set -ex
+# shellcheck disable=SC1091
+source scl_source enable devtoolset-2
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+make plugins
+
+mkdir -p "${ARTIFACTS_OUT}"
+cp bins/opt/protobuf/protoc bins/opt/*_plugin "${ARTIFACTS_OUT}"/
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_python.bat b/grpc/tools/run_tests/artifacts/build_artifact_python.bat
new file mode 100644
index 00000000..82d7a48f
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_python.bat
@@ -0,0 +1,61 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem set path to python & mingw compiler
+set PATH=C:\%1;C:\%1\scripts;C:\msys64\mingw%2\bin;C:\tools\msys64\mingw%2\bin;%PATH%
+
+python -m pip install --upgrade six
+@rem some artifacts are broken for setuptools 38.5.0. See https://github.com/grpc/grpc/issues/14317
+python -m pip install --upgrade setuptools==38.2.4
+python -m pip install --upgrade cython
+python -m pip install -rrequirements.txt --user
+
+set GRPC_PYTHON_BUILD_WITH_CYTHON=1
+
+@rem Allow build_ext to build C/C++ files in parallel
+@rem by enabling a monkeypatch. It speeds up the build a lot.
+set GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS=2
+
+mkdir -p %ARTIFACTS_OUT%
+set ARTIFACT_DIR=%cd%\%ARTIFACTS_OUT%
+
+@rem Set up gRPC Python tools
+python tools\distrib\python\make_grpcio_tools.py
+
+@rem Build gRPC Python extensions
+python setup.py build_ext -c %EXT_COMPILER% || goto :error
+
+pushd tools\distrib\python\grpcio_tools
+python setup.py build_ext -c %EXT_COMPILER% || goto :error
+popd
+
+@rem Build gRPC Python distributions
+python setup.py bdist_wheel || goto :error
+
+pushd tools\distrib\python\grpcio_tools
+python setup.py bdist_wheel || goto :error
+popd
+
+@rem Ensure the generate artifacts are valid.
+python -m pip install "twine<=2.0"
+python -m twine check dist\* tools\distrib\python\grpcio_tools\dist\* || goto :error
+
+xcopy /Y /I /S dist\* %ARTIFACT_DIR% || goto :error
+xcopy /Y /I /S tools\distrib\python\grpcio_tools\dist\* %ARTIFACT_DIR% || goto :error
+
+goto :EOF
+
+:error
+popd
+exit /b 1
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_python.sh b/grpc/tools/run_tests/artifacts/build_artifact_python.sh
new file mode 100755
index 00000000..34907837
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_python.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+export GRPC_PYTHON_BUILD_WITH_CYTHON=1
+export PYTHON=${PYTHON:-python}
+export PIP=${PIP:-pip}
+export AUDITWHEEL=${AUDITWHEEL:-auditwheel}
+
+# Install Cython to avoid source wheel build failure.
+"${PIP}" install --upgrade cython
+
+# Allow build_ext to build C/C++ files in parallel
+# by enabling a monkeypatch. It speeds up the build a lot.
+# Use externally provided GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS value if set.
+export GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS=${GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS:-2}
+
+mkdir -p "${ARTIFACTS_OUT}"
+ARTIFACT_DIR="$PWD/${ARTIFACTS_OUT}"
+
+# Build the source distribution first because MANIFEST.in cannot override
+# exclusion of built shared objects among package resources (for some
+# inexplicable reason).
+${SETARCH_CMD} "${PYTHON}" setup.py sdist
+
+# Wheel has a bug where directories don't get excluded.
+# https://bitbucket.org/pypa/wheel/issues/99/cannot-exclude-directory
+${SETARCH_CMD} "${PYTHON}" setup.py bdist_wheel
+
+GRPCIO_STRIP_TEMPDIR=$(mktemp -d)
+GRPCIO_TAR_GZ_LIST=( dist/grpcio-*.tar.gz )
+GRPCIO_TAR_GZ=${GRPCIO_TAR_GZ_LIST[0]}
+GRPCIO_STRIPPED_TAR_GZ=$(mktemp -t "XXXXXXXXXX.tar.gz")
+
+clean_non_source_files() {
+( cd "$1"
+ find . -type f \
+ | grep -v '\.c$' | grep -v '\.cc$' | grep -v '\.cpp$' \
+ | grep -v '\.h$' | grep -v '\.hh$' | grep -v '\.inc$' \
+ | grep -v '\.s$' | grep -v '\.py$' \
+ | while read -r file; do
+ rm -f "$file" || true
+ done
+ find . -type d -empty -delete
+)
+}
+
+tar xzf "${GRPCIO_TAR_GZ}" -C "${GRPCIO_STRIP_TEMPDIR}"
+( cd "${GRPCIO_STRIP_TEMPDIR}"
+ find . -type d -name .git -exec rm -fr {} \; || true
+ for dir in */third_party/*; do
+ clean_non_source_files "${dir}" || true
+ done
+ tar czf "${GRPCIO_STRIPPED_TAR_GZ}" -- *
+)
+mv "${GRPCIO_STRIPPED_TAR_GZ}" "${GRPCIO_TAR_GZ}"
+
+# Build gRPC tools package distribution
+"${PYTHON}" tools/distrib/python/make_grpcio_tools.py
+
+# Build gRPC tools package source distribution
+${SETARCH_CMD} "${PYTHON}" tools/distrib/python/grpcio_tools/setup.py sdist
+
+# Build gRPC tools package binary distribution
+${SETARCH_CMD} "${PYTHON}" tools/distrib/python/grpcio_tools/setup.py bdist_wheel
+
+if [ "$GRPC_BUILD_MANYLINUX_WHEEL" != "" ]
+then
+ for wheel in dist/*.whl; do
+ "${AUDITWHEEL}" show "$wheel" | tee /dev/stderr | grep -E -w "$AUDITWHEEL_PLAT"
+ "${AUDITWHEEL}" repair "$wheel" -w "$ARTIFACT_DIR"
+ rm "$wheel"
+ done
+ for wheel in tools/distrib/python/grpcio_tools/dist/*.whl; do
+ "${AUDITWHEEL}" show "$wheel" | tee /dev/stderr | grep -E -w "$AUDITWHEEL_PLAT"
+ "${AUDITWHEEL}" repair "$wheel" -w "$ARTIFACT_DIR"
+ rm "$wheel"
+ done
+fi
+
+# We need to use the built grpcio-tools/grpcio to compile the health proto
+# Wheels are not supported by setup_requires/dependency_links, so we
+# manually install the dependency. Note we should only do this if we
+# are in a docker image or in a virtualenv.
+if [ "$GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS" != "" ]
+then
+ "${PIP}" install -rrequirements.txt
+
+ if [ "$("$PYTHON" -c "import sys; print(sys.version_info[0])")" == "2" ]
+ then
+ "${PIP}" install futures>=2.2.0
+ fi
+
+ "${PIP}" install grpcio --no-index --find-links "file://$ARTIFACT_DIR/"
+ "${PIP}" install grpcio-tools --no-index --find-links "file://$ARTIFACT_DIR/"
+
+ # Build grpcio_testing source distribution
+ ${SETARCH_CMD} "${PYTHON}" src/python/grpcio_testing/setup.py preprocess \
+ sdist
+ cp -r src/python/grpcio_testing/dist/* "$ARTIFACT_DIR"
+
+ # Build grpcio_channelz source distribution
+ ${SETARCH_CMD} "${PYTHON}" src/python/grpcio_channelz/setup.py \
+ preprocess build_package_protos sdist
+ cp -r src/python/grpcio_channelz/dist/* "$ARTIFACT_DIR"
+
+ # Build grpcio_health_checking source distribution
+ ${SETARCH_CMD} "${PYTHON}" src/python/grpcio_health_checking/setup.py \
+ preprocess build_package_protos sdist
+ cp -r src/python/grpcio_health_checking/dist/* "$ARTIFACT_DIR"
+
+ # Build grpcio_reflection source distribution
+ ${SETARCH_CMD} "${PYTHON}" src/python/grpcio_reflection/setup.py \
+ preprocess build_package_protos sdist
+ cp -r src/python/grpcio_reflection/dist/* "$ARTIFACT_DIR"
+
+ # Build grpcio_status source distribution
+ ${SETARCH_CMD} "${PYTHON}" src/python/grpcio_status/setup.py \
+ preprocess sdist
+ cp -r src/python/grpcio_status/dist/* "$ARTIFACT_DIR"
+fi
+
+# Ensure the generated artifacts are valid.
+"${PYTHON}" -m virtualenv venv || { "${PYTHON}" -m pip install virtualenv==16.7.9 && "${PYTHON}" -m virtualenv venv; }
+venv/bin/python -m pip install "twine<=2.0"
+venv/bin/python -m twine check dist/* tools/distrib/python/grpcio_tools/dist/*
+rm -rf venv/
+
+cp -r dist/* "$ARTIFACT_DIR"
+cp -r tools/distrib/python/grpcio_tools/dist/* "$ARTIFACT_DIR"
diff --git a/grpc/tools/run_tests/artifacts/build_artifact_ruby.sh b/grpc/tools/run_tests/artifacts/build_artifact_ruby.sh
new file mode 100755
index 00000000..b0eecf25
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_artifact_ruby.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -ex
+
+SYSTEM=$(uname | cut -f 1 -d_)
+
+cd "$(dirname "$0")/../../.."
+set +ex
+# shellcheck disable=SC1091
+[[ -s /etc/profile.d/rvm.sh ]] && . /etc/profile.d/rvm.sh
+# shellcheck disable=SC1090
+[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
+set -ex
+
+if [ "$SYSTEM" == "MSYS" ] ; then
+ SYSTEM=MINGW32
+fi
+if [ "$SYSTEM" == "MINGW64" ] ; then
+ SYSTEM=MINGW32
+fi
+
+if [ "$SYSTEM" == "MINGW32" ] ; then
+ echo "Need Linux to build the Windows ruby gem."
+ exit 1
+fi
+
+set +ex
+
+# To workaround the problem with bundler 2.1.0 and rubygems-bundler 1.4.5
+# https://github.com/bundler/bundler/issues/7488
+rvm @global
+gem uninstall rubygems-bundler
+
+rvm use default
+gem install bundler -v 1.17.3
+
+tools/run_tests/helper_scripts/bundle_install_wrapper.sh
+
+set -ex
+
+export DOCKERHUB_ORGANIZATION=grpctesting
+rake gem:native
+
+if [ "$SYSTEM" == "Darwin" ] ; then
+ # TODO: consider rewriting this to pass shellcheck
+ # shellcheck disable=SC2046,SC2010
+ rm $(ls pkg/*.gem | grep -v darwin)
+fi
+
+mkdir -p "${ARTIFACTS_OUT}"
+
+cp pkg/*.gem "${ARTIFACTS_OUT}"/
diff --git a/grpc/tools/run_tests/artifacts/build_package_php.sh b/grpc/tools/run_tests/artifacts/build_package_php.sh
new file mode 100755
index 00000000..e263e09a
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_package_php.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# All the PHP packages have been built in the artifact phase already
+# and we only collect them here to deliver them to the distribtest phase.
+mkdir -p artifacts/
+cp -r "${EXTERNAL_GIT_ROOT}"/input_artifacts/php_*/* artifacts/ || true
diff --git a/grpc/tools/run_tests/artifacts/build_package_python.sh b/grpc/tools/run_tests/artifacts/build_package_python.sh
new file mode 100755
index 00000000..35c78e9a
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_package_python.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+mkdir -p artifacts/
+
+# All the python packages have been built in the artifact phase already
+# and we only collect them here to deliver them to the distribtest phase.
+cp -r "${EXTERNAL_GIT_ROOT}"/input_artifacts/python_*/* artifacts/ || true
+
+apt-get install -y python-pip
+python -m pip install -U pip
+python -m pip install -U wheel
+
+strip_binary_wheel() {
+ WHEEL_PATH="$1"
+ TEMP_WHEEL_DIR=$(mktemp -d)
+ python -m wheel unpack "$WHEEL_PATH" -d "$TEMP_WHEEL_DIR"
+ find "$TEMP_WHEEL_DIR" -name "_protoc_compiler*.so" -exec strip --strip-debug {} ";"
+ find "$TEMP_WHEEL_DIR" -name "cygrpc*.so" -exec strip --strip-debug {} ";"
+
+ WHEEL_FILE=$(basename "$WHEEL_PATH")
+ DISTRIBUTION_NAME=$(basename "$WHEEL_PATH" | cut -d '-' -f 1)
+ VERSION=$(basename "$WHEEL_PATH" | cut -d '-' -f 2)
+ python -m wheel pack "$TEMP_WHEEL_DIR/$DISTRIBUTION_NAME-$VERSION" -d "$TEMP_WHEEL_DIR"
+ mv "$TEMP_WHEEL_DIR/$WHEEL_FILE" "$WHEEL_PATH"
+}
+
+for wheel in artifacts/*.whl; do
+ strip_binary_wheel "$wheel"
+done
+
+# TODO: all the artifact builder configurations generate a grpcio-VERSION.tar.gz
+# source distribution package, and only one of them will end up
+# in the artifacts/ directory. They should be all equivalent though.
diff --git a/grpc/tools/run_tests/artifacts/build_package_ruby.sh b/grpc/tools/run_tests/artifacts/build_package_ruby.sh
new file mode 100755
index 00000000..bd62ec1f
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/build_package_ruby.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+base=$(pwd)
+
+mkdir -p artifacts/
+
+# All the ruby packages have been built in the artifact phase already
+# and we only collect them here to deliver them to the distribtest phase.
+cp -r "${EXTERNAL_GIT_ROOT}"/input_artifacts/ruby_native_gem_*/* artifacts/ || true
+
+well_known_protos=( any api compiler/plugin descriptor duration empty field_mask source_context struct timestamp type wrappers )
+
+# TODO: all the artifact builder configurations generate a grpc-VERSION.gem
+# source distribution package, and only one of them will end up
+# in the artifacts/ directory. They should be all equivalent though.
+
+for arch in {x86,x64}; do
+ case $arch in
+ x64)
+ ruby_arch=x86_64
+ ;;
+ *)
+ ruby_arch=$arch
+ ;;
+ esac
+ for plat in {windows,linux,macos}; do
+ input_dir="${EXTERNAL_GIT_ROOT}/input_artifacts/protoc_${plat}_${arch}"
+ output_dir="$base/src/ruby/tools/bin/${ruby_arch}-${plat}"
+ mkdir -p "$output_dir"/google/protobuf
+ mkdir -p "$output_dir"/google/protobuf/compiler # needed for plugin.proto
+ cp "$input_dir"/protoc* "$input_dir"/grpc_ruby_plugin* "$output_dir/"
+ if [[ "$plat" != "windows" ]]
+ then
+ chmod +x "$output_dir/protoc" "$output_dir/grpc_ruby_plugin"
+ fi
+ for proto in "${well_known_protos[@]}"; do
+ cp "$base/third_party/protobuf/src/google/protobuf/$proto.proto" "$output_dir/google/protobuf/$proto.proto"
+ done
+ done
+done
+
+cd "$base/src/ruby/tools"
+gem build grpc-tools.gemspec
+cp ./grpc-tools*.gem "$base/artifacts/"
diff --git a/grpc/tools/run_tests/artifacts/distribtest_targets.py b/grpc/tools/run_tests/artifacts/distribtest_targets.py
new file mode 100644
index 00000000..7147f46d
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/distribtest_targets.py
@@ -0,0 +1,352 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Definition of targets run distribution package tests."""
+
+import os.path
+import sys
+
+sys.path.insert(0, os.path.abspath('..'))
+import python_utils.jobset as jobset
+
+
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0,
+ copy_rel_path=None,
+ timeout_seconds=30 * 60):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+ # the entire repo will be cloned if copy_rel_path is not set.
+ if copy_rel_path:
+ environ['RELATIVE_COPY_PATH'] = copy_rel_path
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
+ }
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='distribtest.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ=None,
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0,
+ use_workspace=False,
+ timeout_seconds=10 * 60):
+ """Creates jobspec."""
+ environ = environ.copy()
+ if use_workspace:
+ environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+ cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+ ] + cmdline
+ jobspec = jobset.JobSpec(cmdline=cmdline,
+ environ=environ,
+ shortname='distribtest.%s' % (name),
+ timeout_seconds=timeout_seconds,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ shell=shell)
+ return jobspec
+
+
+class CSharpDistribTest(object):
+ """Tests C# NuGet package"""
+
+ def __init__(self, platform, arch, docker_suffix=None,
+ use_dotnet_cli=False):
+ self.name = 'csharp_%s_%s' % (platform, arch)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'csharp', platform, arch]
+ self.script_suffix = ''
+ if docker_suffix:
+ self.name += '_%s' % docker_suffix
+ self.labels.append(docker_suffix)
+ if use_dotnet_cli:
+ self.name += '_dotnetcli'
+ self.script_suffix = '_dotnetcli'
+ self.labels.append('dotnetcli')
+ else:
+ self.labels.append('olddotnet')
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/csharp_%s_%s' %
+ (self.docker_suffix, self.arch),
+ 'test/distrib/csharp/run_distrib_test%s.sh' %
+ self.script_suffix,
+ copy_rel_path='test/distrib')
+ elif self.platform == 'macos':
+ return create_jobspec(self.name, [
+ 'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix
+ ],
+ environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+ use_workspace=True)
+ elif self.platform == 'windows':
+ if self.arch == 'x64':
+ # Use double leading / as the first occurrence gets removed by msys bash
+ # when invoking the .bat file (side-effect of posix path conversion)
+ environ = {
+ 'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
+ 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
+ }
+ else:
+ environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
+ return create_jobspec(self.name, [
+ 'test\\distrib\\csharp\\run_distrib_test%s.bat' %
+ self.script_suffix
+ ],
+ environ=environ,
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
+
+
+class PythonDistribTest(object):
+ """Tests Python package"""
+
+ def __init__(self, platform, arch, docker_suffix, source=False):
+ self.source = source
+ if source:
+ self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix)
+ else:
+ self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if not self.platform == 'linux':
+ raise Exception("Not supported yet.")
+
+ if self.source:
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/python_dev_%s_%s' %
+ (self.docker_suffix, self.arch),
+ 'test/distrib/python/run_source_distrib_test.sh',
+ copy_rel_path='test/distrib')
+ else:
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/python_%s_%s' %
+ (self.docker_suffix, self.arch),
+ 'test/distrib/python/run_binary_distrib_test.sh',
+ copy_rel_path='test/distrib')
+
+ def __str__(self):
+ return self.name
+
+
+class RubyDistribTest(object):
+ """Tests Ruby package"""
+
+ def __init__(self, platform, arch, docker_suffix, ruby_version=None):
+ self.name = 'ruby_%s_%s_%s_version_%s' % (platform, arch, docker_suffix,
+ ruby_version or 'unspecified')
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.ruby_version = ruby_version
+ self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ arch_to_gem_arch = {
+ 'x64': 'x86_64',
+ 'x86': 'x86',
+ }
+ if not self.platform == 'linux':
+ raise Exception("Not supported yet.")
+
+ dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % (
+ self.docker_suffix, self.arch)
+ if self.ruby_version is not None:
+ dockerfile_name += '_%s' % self.ruby_version
+ return create_docker_jobspec(
+ self.name,
+ dockerfile_name,
+ 'test/distrib/ruby/run_distrib_test.sh %s %s' %
+ (arch_to_gem_arch[self.arch], self.platform),
+ copy_rel_path='test/distrib')
+
+ def __str__(self):
+ return self.name
+
+
+class PHPDistribTest(object):
+ """Tests PHP package"""
+
+ def __init__(self, platform, arch, docker_suffix=None):
+ self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/php_%s_%s' %
+ (self.docker_suffix, self.arch),
+ 'test/distrib/php/run_distrib_test.sh',
+ copy_rel_path='test/distrib')
+ elif self.platform == 'macos':
+ return create_jobspec(
+ self.name, ['test/distrib/php/run_distrib_test_macos.sh'],
+ environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
+
+
+class CppDistribTest(object):
+ """Tests Cpp make install by building examples."""
+
+ def __init__(self, platform, arch, docker_suffix=None, testcase=None):
+ if platform == 'linux':
+ self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
+ testcase)
+ else:
+ self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
+ self.platform = platform
+ self.arch = arch
+ self.docker_suffix = docker_suffix
+ self.testcase = testcase
+ self.labels = [
+ 'distribtest', 'cpp', platform, arch, docker_suffix, testcase
+ ]
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.platform == 'linux':
+ return create_docker_jobspec(
+ self.name,
+ 'tools/dockerfile/distribtest/cpp_%s_%s' %
+ (self.docker_suffix, self.arch),
+ 'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase,
+ timeout_seconds=45 * 60)
+ elif self.platform == 'windows':
+ return create_jobspec(
+ self.name,
+ ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
+ environ={},
+ timeout_seconds=30 * 60,
+ use_workspace=True)
+ else:
+ raise Exception("Not supported yet.")
+
+ def __str__(self):
+ return self.name
+
+
+def targets():
+ """Gets list of supported targets"""
+ return [
+ CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
+ CppDistribTest('linux', 'x64', 'jessie', 'cmake_as_submodule'),
+ CppDistribTest('linux', 'x64', 'stretch', 'cmake'),
+ CppDistribTest('linux', 'x64', 'stretch', 'cmake_as_externalproject'),
+ CppDistribTest('linux', 'x64', 'stretch', 'cmake_fetchcontent'),
+ CppDistribTest('linux', 'x64', 'stretch', 'cmake_module_install'),
+ CppDistribTest('linux', 'x64', 'stretch',
+ 'cmake_module_install_pkgconfig'),
+ CppDistribTest('linux', 'x64', 'stretch', 'cmake_pkgconfig'),
+ CppDistribTest('linux', 'x64', 'stretch', 'raspberry_pi'),
+ CppDistribTest('windows', 'x86', testcase='cmake'),
+ CppDistribTest('windows', 'x86', testcase='cmake_as_externalproject'),
+ CSharpDistribTest('linux', 'x64', 'jessie'),
+ CSharpDistribTest('linux', 'x86', 'jessie'),
+ CSharpDistribTest('linux', 'x64', 'stretch'),
+ CSharpDistribTest('linux', 'x64', 'stretch', use_dotnet_cli=True),
+ CSharpDistribTest('linux', 'x64', 'centos7'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
+ CSharpDistribTest('linux', 'x64', 'ubuntu1604', use_dotnet_cli=True),
+ CSharpDistribTest('linux', 'x64', 'alpine', use_dotnet_cli=True),
+ CSharpDistribTest('macos', 'x86'),
+ CSharpDistribTest('windows', 'x86'),
+ CSharpDistribTest('windows', 'x64'),
+ PythonDistribTest('linux', 'x64', 'jessie'),
+ PythonDistribTest('linux', 'x86', 'jessie'),
+ PythonDistribTest('linux', 'x64', 'centos6'),
+ PythonDistribTest('linux', 'x64', 'centos7'),
+ PythonDistribTest('linux', 'x64', 'fedora23'),
+ PythonDistribTest('linux', 'x64', 'opensuse'),
+ PythonDistribTest('linux', 'x64', 'arch'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1404'),
+ PythonDistribTest('linux', 'x64', 'ubuntu1604'),
+ PythonDistribTest('linux', 'x64', 'alpine3.7', source=True),
+ PythonDistribTest('linux', 'x64', 'jessie', source=True),
+ PythonDistribTest('linux', 'x86', 'jessie', source=True),
+ PythonDistribTest('linux', 'x64', 'centos7', source=True),
+ PythonDistribTest('linux', 'x64', 'fedora23', source=True),
+ PythonDistribTest('linux', 'x64', 'arch', source=True),
+ PythonDistribTest('linux', 'x64', 'ubuntu1404', source=True),
+ PythonDistribTest('linux', 'x64', 'ubuntu1604', source=True),
+ RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_3'),
+ RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_4'),
+ RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_5'),
+ RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_6'),
+ RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_7'),
+ RubyDistribTest('linux', 'x64', 'centos6'),
+ RubyDistribTest('linux', 'x64', 'centos7'),
+ RubyDistribTest('linux', 'x64', 'fedora23'),
+ RubyDistribTest('linux', 'x64', 'opensuse'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1404'),
+ RubyDistribTest('linux', 'x64', 'ubuntu1604'),
+ PHPDistribTest('linux', 'x64', 'jessie'),
+ PHPDistribTest('macos', 'x64'),
+ ]
diff --git a/grpc/tools/run_tests/artifacts/package_targets.py b/grpc/tools/run_tests/artifacts/package_targets.py
new file mode 100644
index 00000000..9a52bec1
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/package_targets.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Definition of targets to build distribution packages."""
+
+import os.path
+import sys
+
+sys.path.insert(0, os.path.abspath('..'))
+import python_utils.jobset as jobset
+
+
+def create_docker_jobspec(name,
+ dockerfile_dir,
+ shell_command,
+ environ={},
+ flake_retries=0,
+ timeout_retries=0):
+ """Creates jobspec for a task running under docker."""
+ environ = environ.copy()
+ environ['RUN_COMMAND'] = shell_command
+
+ docker_args = []
+ for k, v in environ.items():
+ docker_args += ['-e', '%s=%s' % (k, v)]
+ docker_env = {
+ 'DOCKERFILE_DIR': dockerfile_dir,
+ 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+ 'OUTPUT_DIR': 'artifacts'
+ }
+ jobspec = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+ docker_args,
+ environ=docker_env,
+ shortname='build_package.%s' % (name),
+ timeout_seconds=30 * 60,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries)
+ return jobspec
+
+
+def create_jobspec(name,
+ cmdline,
+ environ=None,
+ cwd=None,
+ shell=False,
+ flake_retries=0,
+ timeout_retries=0,
+ cpu_cost=1.0):
+ """Creates jobspec."""
+ jobspec = jobset.JobSpec(cmdline=cmdline,
+ environ=environ,
+ cwd=cwd,
+ shortname='build_package.%s' % (name),
+ timeout_seconds=10 * 60,
+ flake_retries=flake_retries,
+ timeout_retries=timeout_retries,
+ cpu_cost=cpu_cost,
+ shell=shell)
+ return jobspec
+
+
+class CSharpPackage:
+ """Builds C# packages."""
+
+ def __init__(self, unity=False):
+ self.unity = unity
+ self.labels = ['package', 'csharp', 'windows']
+ if unity:
+ self.name = 'csharp_package_unity_windows'
+ self.labels += ['unity']
+ else:
+ self.name = 'csharp_package_nuget_windows'
+ self.labels += ['nuget']
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ if self.unity:
+ # use very high CPU cost to avoid running nuget package build
+ # and unity build concurrently
+ return create_jobspec(self.name, ['build_unitypackage.bat'],
+ cwd='src\\csharp',
+ cpu_cost=1e6,
+ shell=True)
+ else:
+ return create_jobspec(self.name, ['build_packages_dotnetcli.bat'],
+ cwd='src\\csharp',
+ shell=True)
+
+ def __str__(self):
+ return self.name
+
+
+class RubyPackage:
+ """Collects ruby gems created in the artifact phase"""
+
+ def __init__(self):
+ self.name = 'ruby_package'
+ self.labels = ['package', 'ruby', 'linux']
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_ruby.sh')
+
+
+class PythonPackage:
+ """Collects python eggs and wheels created in the artifact phase"""
+
+ def __init__(self):
+ self.name = 'python_package'
+ self.labels = ['package', 'python', 'linux']
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_python.sh')
+
+
+class PHPPackage:
+ """Copy PHP PECL package artifact"""
+
+ def __init__(self):
+ self.name = 'php_package'
+ self.labels = ['package', 'php', 'linux']
+
+ def pre_build_jobspecs(self):
+ return []
+
+ def build_jobspec(self):
+ return create_docker_jobspec(
+ self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+ 'tools/run_tests/artifacts/build_package_php.sh')
+
+
+def targets():
+ """Gets list of supported targets"""
+ return [
+ CSharpPackage(),
+ CSharpPackage(unity=True),
+ RubyPackage(),
+ PythonPackage(),
+ PHPPackage()
+ ]
diff --git a/grpc/tools/run_tests/artifacts/run_in_workspace.sh b/grpc/tools/run_tests/artifacts/run_in_workspace.sh
new file mode 100755
index 00000000..f4719b0a
--- /dev/null
+++ b/grpc/tools/run_tests/artifacts/run_in_workspace.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Create a workspace in a subdirectory to allow running multiple builds in isolation.
+# WORKSPACE_NAME env variable needs to contain name of the workspace to create.
+# All cmdline args will be executed as a command.
+set -ex
+
+cd "$(dirname "$0")/../../.."
+repo_root=$(pwd)
+export repo_root
+
+# TODO: fix file to pass shellcheck
+
+rm -rf "${WORKSPACE_NAME}"
+git clone . "${WORKSPACE_NAME}"
+# clone gRPC submodules, use data from locally cloned submodules where possible
+# shellcheck disable=SC1004,SC2016
+git submodule foreach 'cd "${repo_root}/${WORKSPACE_NAME}" \
+ && git submodule update --init --reference ${repo_root}/${name} ${name}'
+
+echo "Running in workspace ${WORKSPACE_NAME}"
+cd "${WORKSPACE_NAME}"
+# shellcheck disable=SC2068
+$@
diff --git a/grpc/tools/run_tests/build_stats/build_stats_schema.json b/grpc/tools/run_tests/build_stats/build_stats_schema.json
new file mode 100644
index 00000000..021a3495
--- /dev/null
+++ b/grpc/tools/run_tests/build_stats/build_stats_schema.json
@@ -0,0 +1,56 @@
+[
+ {
+ "name": "build_number",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "timestamp",
+ "type": "TIMESTAMP",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "matrix",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "name",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "duration",
+ "type": "FLOAT",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "pass_count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "failure_count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "error",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "description",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ }
+ ]
+ }
+ ]
+ }
+]
diff --git a/grpc/tools/run_tests/build_stats/build_stats_schema_no_matrix.json b/grpc/tools/run_tests/build_stats/build_stats_schema_no_matrix.json
new file mode 100644
index 00000000..eeb067d7
--- /dev/null
+++ b/grpc/tools/run_tests/build_stats/build_stats_schema_no_matrix.json
@@ -0,0 +1,49 @@
+[
+ {
+ "name": "build_number",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "timestamp",
+ "type": "TIMESTAMP",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "duration",
+ "type": "FLOAT",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "pass_count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "failure_count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "no_report_files_found",
+ "type": "BOOLEAN",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "error",
+ "type": "RECORD",
+ "mode": "REPEATED",
+ "fields": [
+ {
+ "name": "description",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "count",
+ "type": "INTEGER",
+ "mode": "NULLABLE"
+ }
+ ]
+ }
+]
diff --git a/grpc/tools/run_tests/dockerize/build_and_run_docker.sh b/grpc/tools/run_tests/dockerize/build_and_run_docker.sh
new file mode 100755
index 00000000..a5ef4284
--- /dev/null
+++ b/grpc/tools/run_tests/dockerize/build_and_run_docker.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Builds docker image and runs a command under it.
+# You should never need to call this script on your own.
+
+# shellcheck disable=SC2103
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+git_root=$(pwd)
+cd -
+
+# Inputs
+# DOCKERFILE_DIR - Directory in which Dockerfile file is located.
+# DOCKER_RUN_SCRIPT - Script to run under docker (relative to grpc repo root)
+# OUTPUT_DIR - Directory that will be copied from inside docker after finishing.
+# DOCKERHUB_ORGANIZATION - If set, pull a prebuilt image from given dockerhub org.
+# DOCKER_BASE_IMAGE - If set, pull the latest base image.
+# $@ - Extra args to pass to docker run
+
+# Use image name based on Dockerfile location checksum
+DOCKER_IMAGE_NAME=$(basename "$DOCKERFILE_DIR")_$(sha1sum "$DOCKERFILE_DIR/Dockerfile" | cut -f1 -d\ )
+
+# Pull the base image to force an update
+if [ "$DOCKER_BASE_IMAGE" != "" ]
+then
+ time docker pull "$DOCKER_BASE_IMAGE"
+fi
+
+if [ "$DOCKERHUB_ORGANIZATION" != "" ]
+then
+ DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME
+ time docker pull "$DOCKER_IMAGE_NAME"
+else
+ # Make sure docker image has been built. Should be instantaneous if so.
+ docker build -t "$DOCKER_IMAGE_NAME" "$DOCKERFILE_DIR"
+fi
+
+# Choose random name for docker container
+CONTAINER_NAME="build_and_run_docker_$(uuidgen)"
+
+# Run command inside docker
+# TODO: use a proper array instead of $EXTRA_DOCKER_ARGS
+# shellcheck disable=SC2086
+docker run \
+ "$@" \
+ --cap-add SYS_PTRACE \
+ -e EXTERNAL_GIT_ROOT="/var/local/jenkins/grpc" \
+ -e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
+ -e "KOKORO_BUILD_ID=$KOKORO_BUILD_ID" \
+ -e "KOKORO_BUILD_NUMBER=$KOKORO_BUILD_NUMBER" \
+ -e "KOKORO_BUILD_URL=$KOKORO_BUILD_URL" \
+ -e "KOKORO_JOB_NAME=$KOKORO_JOB_NAME" \
+ -v "$git_root:/var/local/jenkins/grpc:ro" \
+ -w /var/local/git/grpc \
+ --name="$CONTAINER_NAME" \
+ $EXTRA_DOCKER_ARGS \
+ "$DOCKER_IMAGE_NAME" \
+ /bin/bash -l "/var/local/jenkins/grpc/$DOCKER_RUN_SCRIPT" || FAILED="true"
+
+# Copy output artifacts
+if [ "$OUTPUT_DIR" != "" ]
+then
+ # Create the artifact directory in advance to avoid a race in "docker cp" if tasks
+ # that were running in parallel finish at the same time.
+ # see https://github.com/grpc/grpc/issues/16155
+ mkdir -p "$git_root/$OUTPUT_DIR"
+ docker cp "$CONTAINER_NAME:/var/local/git/grpc/$OUTPUT_DIR" "$git_root" || FAILED="true"
+fi
+
+# remove the container, possibly killing it first
+docker rm -f "$CONTAINER_NAME" || true
+
+if [ "$FAILED" != "" ]
+then
+ exit 1
+fi
diff --git a/grpc/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/grpc/tools/run_tests/dockerize/build_docker_and_run_tests.sh
new file mode 100755
index 00000000..aeaae3f9
--- /dev/null
+++ b/grpc/tools/run_tests/dockerize/build_docker_and_run_tests.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script is invoked by run_tests.py to accommodate "test under docker"
+# scenario. You should never need to call this script on your own.
+
+# shellcheck disable=SC2103
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+git_root=$(pwd)
+cd -
+
+# Inputs
+# DOCKERFILE_DIR - Directory in which Dockerfile file is located.
+# DOCKER_RUN_SCRIPT - Script to run under docker (relative to grpc repo root)
+# DOCKERHUB_ORGANIZATION - If set, pull a prebuilt image from given dockerhub org.
+
+# Use image name based on Dockerfile location checksum
+DOCKER_IMAGE_NAME=$(basename "$DOCKERFILE_DIR")_$(sha1sum "$DOCKERFILE_DIR/Dockerfile" | cut -f1 -d\ )
+
+if [ "$DOCKERHUB_ORGANIZATION" != "" ]
+then
+ DOCKER_IMAGE_NAME=$DOCKERHUB_ORGANIZATION/$DOCKER_IMAGE_NAME
+ time docker pull "$DOCKER_IMAGE_NAME"
+else
+ # Make sure docker image has been built. Should be instantaneous if so.
+ docker build -t "$DOCKER_IMAGE_NAME" "$DOCKERFILE_DIR"
+fi
+
+# Choose random name for docker container
+CONTAINER_NAME="run_tests_$(uuidgen)"
+
+# Git root as seen by the docker instance
+docker_instance_git_root=/var/local/jenkins/grpc
+
+# Run tests inside docker
+DOCKER_EXIT_CODE=0
+# TODO: silence complaint about $TTY_FLAG expansion in some other way
+# shellcheck disable=SC2086,SC2154
+docker run \
+ --cap-add SYS_PTRACE \
+ -e "RUN_TESTS_COMMAND=$RUN_TESTS_COMMAND" \
+ -e "config=$config" \
+ -e "arch=$arch" \
+ -e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
+ -e HOST_GIT_ROOT="$git_root" \
+ -e LOCAL_GIT_ROOT=$docker_instance_git_root \
+ -e "BUILD_ID=$BUILD_ID" \
+ -e "BUILD_URL=$BUILD_URL" \
+ -e "JOB_BASE_NAME=$JOB_BASE_NAME" \
+ -e "KOKORO_BUILD_ID=$KOKORO_BUILD_ID" \
+ -e "KOKORO_BUILD_NUMBER=$KOKORO_BUILD_NUMBER" \
+ -e "KOKORO_BUILD_URL=$KOKORO_BUILD_URL" \
+ -e "KOKORO_JOB_NAME=$KOKORO_JOB_NAME" \
+ -i \
+ $TTY_FLAG \
+ --sysctl net.ipv6.conf.all.disable_ipv6=0 \
+ -v ~/.config/gcloud:/root/.config/gcloud \
+ -v "$git_root:$docker_instance_git_root" \
+ -v /tmp/npm-cache:/tmp/npm-cache \
+ -w /var/local/git/grpc \
+ --name="$CONTAINER_NAME" \
+ "$DOCKER_IMAGE_NAME" \
+ bash -l "/var/local/jenkins/grpc/$DOCKER_RUN_SCRIPT" || DOCKER_EXIT_CODE=$?
+
+# use unique name for reports.zip to prevent clash between concurrent
+# run_tests.py runs
+TEMP_REPORTS_ZIP=$(mktemp)
+docker cp "$CONTAINER_NAME:/var/local/git/grpc/reports.zip" "${TEMP_REPORTS_ZIP}" || true
+unzip -o "${TEMP_REPORTS_ZIP}" -d "$git_root" || true
+rm -f "${TEMP_REPORTS_ZIP}"
+
+# remove the container, possibly killing it first
+docker rm -f "$CONTAINER_NAME" || true
+
+exit $DOCKER_EXIT_CODE
diff --git a/grpc/tools/run_tests/dockerize/build_interop_image.sh b/grpc/tools/run_tests/dockerize/build_interop_image.sh
new file mode 100755
index 00000000..9516c5ad
--- /dev/null
+++ b/grpc/tools/run_tests/dockerize/build_interop_image.sh
@@ -0,0 +1,127 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script is invoked by run_interop_tests.py to build the docker image
+# for interop testing. You should never need to call this script on your own.
+
+set -ex
+
+# Params:
+# INTEROP_IMAGE - name of tag of the final interop image
+# BASE_NAME - base name used to locate the base Dockerfile and build script
+# TTY_FLAG - optional -t flag to make docker allocate tty
+# BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
+# docker run command
+# GRPC_ROOT - grpc base directory, default to top of this tree.
+# GRPC_GO_ROOT - grpc-go base directory, default to '$GRPC_ROOT/../grpc-go'
+# GRPC_JAVA_ROOT - grpc-java base directory, default to '$GRPC_ROOT/../grpc-java'
+
+cd "$(dirname "$0")/../../.."
+echo "GRPC_ROOT: ${GRPC_ROOT:=$(pwd)}"
+MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
+
+echo "GRPC_JAVA_ROOT: ${GRPC_JAVA_ROOT:=$(cd ../grpc-java && pwd)}"
+if [ -n "$GRPC_JAVA_ROOT" ]
+then
+ MOUNT_ARGS+=" -v $GRPC_JAVA_ROOT:/var/local/jenkins/grpc-java:ro"
+else
+ echo "WARNING: grpc-java not found, it won't be mounted to the docker container."
+fi
+
+echo "GRPC_GO_ROOT: ${GRPC_GO_ROOT:=$(cd ../grpc-go && pwd)}"
+if [ -n "$GRPC_GO_ROOT" ]
+then
+ MOUNT_ARGS+=" -v $GRPC_GO_ROOT:/var/local/jenkins/grpc-go:ro"
+else
+ echo "WARNING: grpc-go not found, it won't be mounted to the docker container."
+fi
+
+echo "GRPC_DART_ROOT: ${GRPC_DART_ROOT:=$(cd ../grpc-dart && pwd)}"
+if [ -n "$GRPC_DART_ROOT" ]
+then
+ MOUNT_ARGS+=" -v $GRPC_DART_ROOT:/var/local/jenkins/grpc-dart:ro"
+else
+ echo "WARNING: grpc-dart not found, it won't be mounted to the docker container."
+fi
+
+echo "GRPC_NODE_ROOT: ${GRPC_NODE_ROOT:=$(cd ../grpc-node && pwd)}"
+if [ -n "$GRPC_NODE_ROOT" ]
+then
+ MOUNT_ARGS+=" -v $GRPC_NODE_ROOT:/var/local/jenkins/grpc-node:ro"
+else
+ echo "WARNING: grpc-node not found, it won't be mounted to the docker container."
+fi
+
+echo "GRPC_DOTNET_ROOT: ${GRPC_DOTNET_ROOT:=$(cd ../grpc-dotnet && pwd)}"
+if [ -n "$GRPC_DOTNET_ROOT" ]
+then
+ MOUNT_ARGS+=" -v $GRPC_DOTNET_ROOT:/var/local/jenkins/grpc-dotnet:ro"
+else
+ echo "WARNING: grpc-dotnet not found, it won't be mounted to the docker container."
+fi
+
+# Mount service account dir if available.
+# If service_directory does not contain the service account JSON file,
+# some of the tests will fail.
+if [ -e "$HOME/service_account" ]
+then
+ MOUNT_ARGS+=" -v $HOME/service_account:/var/local/jenkins/service_account:ro"
+fi
+
+# Use image name based on Dockerfile checksum
+# on OSX use md5 instead of sha1sum
+if command -v sha1sum > /dev/null;
+then
+ BASE_IMAGE=${BASE_NAME}_$(sha1sum "tools/dockerfile/interoptest/$BASE_NAME/Dockerfile" | cut -f1 -d\ )
+else
+ BASE_IMAGE=${BASE_NAME}_$(md5 -r "tools/dockerfile/interoptest/$BASE_NAME/Dockerfile" | cut -f1 -d\ )
+fi
+
+if [ "$DOCKERHUB_ORGANIZATION" != "" ]
+then
+ BASE_IMAGE=$DOCKERHUB_ORGANIZATION/$BASE_IMAGE
+ time docker pull "$BASE_IMAGE"
+else
+ # Make sure docker image has been built. Should be instantaneous if so.
+ docker build -t "$BASE_IMAGE" --force-rm=true "tools/dockerfile/interoptest/$BASE_NAME" || exit $?
+fi
+
+CONTAINER_NAME="build_${BASE_NAME}_$(uuidgen)"
+
+# Prepare image for interop tests, commit it on success.
+# TODO: Figure out if is safe to eliminate the suppression. It's currently here
+# because $MOUNT_ARGS and $BUILD_INTEROP_DOCKER_EXTRA_ARGS can have legitimate
+# spaces, but the "correct" way to do this is to utilize proper arrays.
+# Same for $TTY_FLAG
+# shellcheck disable=SC2086
+(docker run \
+ --cap-add SYS_PTRACE \
+ -e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
+ -e THIS_IS_REALLY_NEEDED_ONCE_AGAIN='For issue 4835. See https://github.com/docker/docker/issues/14203 for why docker is awful' \
+ -i \
+ $TTY_FLAG \
+ $MOUNT_ARGS \
+ $BUILD_INTEROP_DOCKER_EXTRA_ARGS \
+ --name="$CONTAINER_NAME" \
+ "$BASE_IMAGE" \
+ bash -l "/var/local/jenkins/grpc/tools/dockerfile/interoptest/$BASE_NAME/build_interop.sh" \
+ && docker commit "$CONTAINER_NAME" "$INTEROP_IMAGE" \
+ && echo "Successfully built image $INTEROP_IMAGE")
+EXITCODE=$?
+
+# remove intermediate container, possibly killing it first
+docker rm -f "$CONTAINER_NAME"
+
+exit $EXITCODE
diff --git a/grpc/tools/run_tests/dockerize/docker_run.sh b/grpc/tools/run_tests/dockerize/docker_run.sh
new file mode 100755
index 00000000..e525019c
--- /dev/null
+++ b/grpc/tools/run_tests/dockerize/docker_run.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script is invoked by build_docker_* inside a docker
+# container. You should never need to call this script on your own.
+
+set -ex
+
+if [ "$RELATIVE_COPY_PATH" == "" ]
+then
+ mkdir -p /var/local/git
+ git clone "$EXTERNAL_GIT_ROOT" /var/local/git/grpc
+ # clone gRPC submodules, use data from locally cloned submodules where possible
+ # TODO: figure out a way to eliminate this following shellcheck suppressions
+ # shellcheck disable=SC2016,SC1004
+ (cd "${EXTERNAL_GIT_ROOT}" && git submodule foreach 'git clone ${EXTERNAL_GIT_ROOT}/${name} /var/local/git/grpc/${name}')
+ (cd /var/local/git/grpc && git submodule init)
+else
+ mkdir -p "/var/local/git/grpc/$RELATIVE_COPY_PATH"
+ cp -r "$EXTERNAL_GIT_ROOT/$RELATIVE_COPY_PATH"/* "/var/local/git/grpc/$RELATIVE_COPY_PATH"
+fi
+
+$POST_GIT_STEP
+
+if [ -x "$(command -v rvm)" ]
+then
+ rvm use ruby-2.1
+fi
+
+cd /var/local/git/grpc
+
+$RUN_COMMAND
diff --git a/grpc/tools/run_tests/dockerize/docker_run_tests.sh b/grpc/tools/run_tests/dockerize/docker_run_tests.sh
new file mode 100755
index 00000000..2a3b6f53
--- /dev/null
+++ b/grpc/tools/run_tests/dockerize/docker_run_tests.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script is invoked by build_docker_and_run_tests.sh inside a docker
+# container. You should never need to call this script on your own.
+
+set -e
+
+export CONFIG=${config:-opt}
+export ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer
+export PATH=$PATH:/usr/bin/llvm-symbolizer
+
+mkdir -p /var/local/git
+git clone /var/local/jenkins/grpc /var/local/git/grpc
+# clone gRPC submodules, use data from locally cloned submodules where possible
+# TODO: figure out a way to eliminate this shellcheck suppression:
+# shellcheck disable=SC2016
+(cd /var/local/jenkins/grpc/ && git submodule foreach 'git clone /var/local/jenkins/grpc/${name} /var/local/git/grpc/${name}')
+(cd /var/local/git/grpc/ && git submodule init)
+
+mkdir -p reports
+
+$POST_GIT_STEP
+
+exit_code=0
+
+$RUN_TESTS_COMMAND || exit_code=$?
+
+# The easiest way to copy all the reports files from inside of
+# the docker container is to zip them and then copy the zip.
+zip -r reports.zip reports
+find . -name report.xml -print0 | xargs -0 -r zip reports.zip
+find . -name sponge_log.xml -print0 | xargs -0 -r zip reports.zip
+find . -name 'report_*.xml' -print0 | xargs -0 -r zip reports.zip
+
+exit $exit_code
diff --git a/grpc/tools/run_tests/helper_scripts/build_csharp.bat b/grpc/tools/run_tests/helper_scripts/build_csharp.bat
new file mode 100644
index 00000000..7721d80f
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_csharp.bat
@@ -0,0 +1,27 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal
+
+cd /d %~dp0\..\..\..\src\csharp
+
+dotnet build --configuration %MSBUILD_CONFIG% Grpc.sln || goto :error
+
+endlocal
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/grpc/tools/run_tests/helper_scripts/build_csharp.sh b/grpc/tools/run_tests/helper_scripts/build_csharp.sh
new file mode 100755
index 00000000..c6bee82b
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_csharp.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../../src/csharp"
+
+if [ "$CONFIG" == "gcov" ]
+then
+ # overriding NativeDependenciesConfigurationUnix makes C# project pick up the gcov flavor of grpc_csharp_ext
+ dotnet build --configuration "$MSBUILD_CONFIG" /p:NativeDependenciesConfigurationUnix=gcov Grpc.sln
+else
+ dotnet build --configuration "$MSBUILD_CONFIG" Grpc.sln
+fi
diff --git a/grpc/tools/run_tests/helper_scripts/build_php.sh b/grpc/tools/run_tests/helper_scripts/build_php.sh
new file mode 100755
index 00000000..4add0366
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_php.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+CONFIG=${CONFIG:-opt}
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+root=$(pwd)
+export GRPC_LIB_SUBDIR=libs/$CONFIG
+export CFLAGS="-Wno-parentheses-equality"
+
+# build php
+cd src/php
+
+cd ext/grpc
+phpize
+if [ "$CONFIG" != "gcov" ] ; then
+ ./configure --enable-grpc="$root" --enable-tests
+else
+ ./configure --enable-grpc="$root" --enable-coverage --enable-tests
+fi
+make
diff --git a/grpc/tools/run_tests/helper_scripts/build_python.sh b/grpc/tools/run_tests/helper_scripts/build_python.sh
new file mode 100755
index 00000000..e79a8896
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_python.sh
@@ -0,0 +1,231 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+##########################
+# Portability operations #
+##########################
+
+PLATFORM=$(uname -s)
+
+function is_msys() {
+ if [ "${PLATFORM/MSYS}" != "$PLATFORM" ]; then
+ echo true
+ else
+ exit 1
+ fi
+}
+
+function is_mingw() {
+ if [ "${PLATFORM/MINGW}" != "$PLATFORM" ]; then
+ echo true
+ else
+ exit 1
+ fi
+}
+
+function is_darwin() {
+ if [ "${PLATFORM/Darwin}" != "$PLATFORM" ]; then
+ echo true
+ else
+ exit 1
+ fi
+}
+
+function is_linux() {
+ if [ "${PLATFORM/Linux}" != "$PLATFORM" ]; then
+ echo true
+ else
+ exit 1
+ fi
+}
+
+function inside_venv() {
+ if [[ -n "${VIRTUAL_ENV}" ]]; then
+ echo true
+ fi
+}
+
+# Associated virtual environment name for the given python command.
+function venv() {
+ $1 -c "import sys; print('py{}{}'.format(*sys.version_info[:2]))"
+}
+
+# Path to python executable within a virtual environment depending on the
+# system.
+function venv_relative_python() {
+ if [ "$(is_mingw)" ]; then
+ echo 'Scripts/python.exe'
+ else
+ echo 'bin/python'
+ fi
+}
+
+# Distutils toolchain to use depending on the system.
+function toolchain() {
+ if [ "$(is_mingw)" ]; then
+ echo 'mingw32'
+ else
+ echo 'unix'
+ fi
+}
+
+# TODO(jtattermusch): this adds dependency on grealpath on mac
+# (brew install coreutils) for little reason.
+# Command to invoke the linux command `realpath` or equivalent.
+function script_realpath() {
+ # Find `realpath`
+ if [ -x "$(command -v realpath)" ]; then
+ realpath "$@"
+ elif [ -x "$(command -v grealpath)" ]; then
+ grealpath "$@"
+ else
+ exit 1
+ fi
+}
+
+####################
+# Script Arguments #
+####################
+
+PYTHON=${1:-python2.7}
+VENV=${2:-$(venv "$PYTHON")}
+VENV_RELATIVE_PYTHON=${3:-$(venv_relative_python)}
+TOOLCHAIN=${4:-$(toolchain)}
+
+if [ "$(is_msys)" ]; then
+ echo "MSYS doesn't directly provide the right compiler(s);"
+ echo "switch to a MinGW shell."
+ exit 1
+fi
+
+ROOT=$(pwd)
+export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv $CFLAGS"
+export GRPC_PYTHON_BUILD_WITH_CYTHON=1
+export LANG=en_US.UTF-8
+
+# Allow build_ext to build C/C++ files in parallel
+# by enabling a monkeypatch. It speeds up the build a lot.
+DEFAULT_PARALLEL_JOBS=$(nproc) || DEFAULT_PARALLEL_JOBS=4
+export GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS=${GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS:-$DEFAULT_PARALLEL_JOBS}
+
+# If ccache is available on Linux, use it.
+if [ "$(is_linux)" ]; then
+ # We're not on Darwin (Mac OS X)
+ if [ -x "$(command -v ccache)" ]; then
+ if [ -x "$(command -v gcc)" ]; then
+ export CC='ccache gcc'
+ elif [ -x "$(command -v clang)" ]; then
+ export CC='ccache clang'
+ fi
+ fi
+fi
+
+############################
+# Perform build operations #
+############################
+
+if [[ "$(inside_venv)" ]]; then
+ VENV_PYTHON="$PYTHON"
+else
+ # Instantiate the virtualenv from the Python version passed in.
+ $PYTHON -m pip install --user virtualenv==16.7.9
+ $PYTHON -m virtualenv "$VENV"
+ VENV_PYTHON=$(script_realpath "$VENV/$VENV_RELATIVE_PYTHON")
+fi
+
+# See https://github.com/grpc/grpc/issues/14815 for more context. We cannot rely
+# on pip to upgrade itself because if pip is too old, it may not have the required
+# TLS version to run `pip install`.
+curl https://bootstrap.pypa.io/get-pip.py | $VENV_PYTHON
+
+# pip-installs the directory specified. Used because on MSYS the vanilla Windows
+# Python gets confused when parsing paths.
+pip_install_dir() {
+ PWD=$(pwd)
+ cd "$1"
+ ($VENV_PYTHON setup.py build_ext -c "$TOOLCHAIN" || true)
+ $VENV_PYTHON -m pip install --no-deps .
+ cd "$PWD"
+}
+
+# On library/version/platforms combo that do not have a binary
+# published, we may end up building a dependency from source. In that
+# case, several of our build environment variables may disrupt the
+# third-party build process. This function pipes through only the
+# minimal environment necessary.
+pip_install() {
+ /usr/bin/env -i PATH="$PATH" "$VENV_PYTHON" -m pip install "$@"
+}
+
+case "$VENV" in
+ *py36_gevent*)
+ # TODO(https://github.com/grpc/grpc/issues/15411) unpin this
+ pip_install gevent==1.3.b1
+ ;;
+ *gevent*)
+ pip_install -U gevent
+ ;;
+esac
+
+pip_install --upgrade pip==19.3.1
+pip_install --upgrade setuptools
+pip_install --upgrade cython
+pip_install --upgrade six enum34 protobuf
+
+if [ "$("$VENV_PYTHON" -c "import sys; print(sys.version_info[0])")" == "2" ]
+then
+ pip_install futures
+fi
+
+pip_install_dir "$ROOT"
+
+$VENV_PYTHON "$ROOT/tools/distrib/python/make_grpcio_tools.py"
+pip_install_dir "$ROOT/tools/distrib/python/grpcio_tools"
+
+# Build/install Channelz
+$VENV_PYTHON "$ROOT/src/python/grpcio_channelz/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_channelz/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_channelz"
+
+# Build/install health checking
+$VENV_PYTHON "$ROOT/src/python/grpcio_health_checking/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_health_checking/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_health_checking"
+
+# Build/install reflection
+$VENV_PYTHON "$ROOT/src/python/grpcio_reflection/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_reflection/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_reflection"
+
+# Build/install status proto mapping
+$VENV_PYTHON "$ROOT/src/python/grpcio_status/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_status/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_status"
+
+# Install testing
+pip_install_dir "$ROOT/src/python/grpcio_testing"
+
+# Build/install tests
+pip_install coverage==4.4 oauth2client==4.1.0 \
+ google-auth==1.0.0 requests==2.14.2 \
+ googleapis-common-protos==1.5.5
+$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_tests"
diff --git a/grpc/tools/run_tests/helper_scripts/build_python_msys2.sh b/grpc/tools/run_tests/helper_scripts/build_python_msys2.sh
new file mode 100644
index 00000000..f388b4bf
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_python_msys2.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+BUILD_PYTHON=$(realpath "$(dirname "$0")/build_python.sh")
+export MSYSTEM=$1
+shift 1
+bash --login "$BUILD_PYTHON" "$@"
diff --git a/grpc/tools/run_tests/helper_scripts/build_ruby.sh b/grpc/tools/run_tests/helper_scripts/build_ruby.sh
new file mode 100755
index 00000000..38f99d80
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/build_ruby.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+export GRPC_CONFIG=${CONFIG:-opt}
+
+# change to grpc's ruby directory
+cd "$(dirname "$0")/../../.."
+
+rm -rf ./tmp
+rake compile
+
+# build grpc_ruby_plugin
+make grpc_ruby_plugin -j8
diff --git a/grpc/tools/run_tests/helper_scripts/bundle_install_wrapper.sh b/grpc/tools/run_tests/helper_scripts/bundle_install_wrapper.sh
new file mode 100755
index 00000000..ab31dd5c
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/bundle_install_wrapper.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+SYSTEM=$(uname | cut -f 1 -d_)
+
+if [ "$SYSTEM" == "Darwin" ] ; then
+ # Workaround for crash during bundle install
+ # See suggestion in https://github.com/bundler/bundler/issues/3692
+ BUNDLE_SPECIFIC_PLATFORM=true bundle install
+else
+ bundle install
+fi
+
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_c.sh b/grpc/tools/run_tests/helper_scripts/post_tests_c.sh
new file mode 100755
index 00000000..e4ab2035
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_c.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+if [ "$CONFIG" != "gcov" ] ; then exit ; fi
+
+root=$(readlink -f "$(dirname "$0")/../../..")
+out=$root/reports/c_cxx_coverage
+tmp1=$(mktemp)
+tmp2=$(mktemp)
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/*" "$root/include/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
+
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_csharp.bat b/grpc/tools/run_tests/helper_scripts/post_tests_csharp.bat
new file mode 100644
index 00000000..66155e8f
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_csharp.bat
@@ -0,0 +1,39 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Runs C# tests for given assembly from command line. The Grpc.sln solution needs to be built before running the tests.
+
+setlocal
+
+if not "%CONFIG%" == "gcov" (
+ goto :EOF
+)
+
+@rem enter src/csharp directory
+cd /d %~dp0\..\..\..\src\csharp
+
+@rem Generate code coverage report
+@rem TODO(jtattermusch): currently the report list is hardcoded
+packages\ReportGenerator.2.4.4.0\tools\ReportGenerator.exe -reports:"coverage_csharp_*.xml" -targetdir:"..\..\reports\csharp_coverage" -reporttypes:"Html;TextSummary" || goto :error
+
+@rem Generate the index.html file
+echo ^<html^>^<head^>^</head^>^<body^>^<a href='csharp_coverage/index.htm'^>csharp coverage^</a^>^<br/^>^</body^>^</html^> >..\..\reports\index.html
+
+endlocal
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_csharp.sh b/grpc/tools/run_tests/helper_scripts/post_tests_csharp.sh
new file mode 100755
index 00000000..6473dfd0
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_csharp.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+if [ "$CONFIG" != "gcov" ] ; then exit ; fi
+
+# change to gRPC repo root
+cd "$(dirname "$0")/../../.."
+
+# Generate the csharp extension coverage report
+gcov objs/gcov/src/csharp/ext/*.o
+lcov --base-directory . --directory . -c -o coverage.info
+lcov -e coverage.info '**/src/csharp/ext/*' -o coverage.info
+genhtml -o reports/csharp_ext_coverage --num-spaces 2 \
+ -t 'gRPC C# native extension test coverage' coverage.info \
+ --rc genhtml_hi_limit=95 --rc genhtml_med_limit=80 --no-prefix
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_php.sh b/grpc/tools/run_tests/helper_scripts/post_tests_php.sh
new file mode 100755
index 00000000..b23e4bd1
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_php.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+if [ "$CONFIG" != "gcov" ] ; then exit ; fi
+
+root=$(readlink -f "$(dirname "$0")/../../..")
+out=$root/reports/php_ext_coverage
+tmp1=$(mktemp)
+tmp2=$(mktemp)
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/php/ext/grpc/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
+
+# todo(mattkwong): generate coverage report for php and copy to reports/php
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_python.sh b/grpc/tools/run_tests/helper_scripts/post_tests_python.sh
new file mode 100755
index 00000000..bca9b20e
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_python.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+if [ "$CONFIG" != "gcov" ] ; then exit ; fi
+
+# change to directory of Python coverage files
+cd "$(dirname "$0")/../../../src/python/grpcio_tests/"
+
+coverage combine .
+coverage html -i -d ./../../../reports/python
diff --git a/grpc/tools/run_tests/helper_scripts/post_tests_ruby.sh b/grpc/tools/run_tests/helper_scripts/post_tests_ruby.sh
new file mode 100755
index 00000000..f0860015
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/post_tests_ruby.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+if [ "$CONFIG" != "gcov" ] ; then exit ; fi
+
+root=$(readlink -f "$(dirname "$0")/../../..")
+out=$root/reports/ruby_ext_coverage
+tmp1=$(mktemp)
+tmp2=$(mktemp)
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/ruby/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
+
+cp -rv "$root/coverage" "$root/reports/ruby"
diff --git a/grpc/tools/run_tests/helper_scripts/pre_build_cmake.bat b/grpc/tools/run_tests/helper_scripts/pre_build_cmake.bat
new file mode 100644
index 00000000..f0777760
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/pre_build_cmake.bat
@@ -0,0 +1,35 @@
+@rem Copyright 2017 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal
+
+set GENERATOR=%1
+set ARCHITECTURE=%2
+
+cd /d %~dp0\..\..\..
+
+mkdir cmake
+cd cmake
+mkdir build
+cd build
+
+cmake -G %GENERATOR% -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=ON ../.. || goto :error
+
+endlocal
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/grpc/tools/run_tests/helper_scripts/pre_build_cmake.sh b/grpc/tools/run_tests/helper_scripts/pre_build_cmake.sh
new file mode 100755
index 00000000..bb36588b
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/pre_build_cmake.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+mkdir -p cmake/build
+cd cmake/build
+
+# MSBUILD_CONFIG's values are suitable for cmake as well
+cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" ../..
diff --git a/grpc/tools/run_tests/helper_scripts/pre_build_csharp.bat b/grpc/tools/run_tests/helper_scripts/pre_build_csharp.bat
new file mode 100644
index 00000000..05c6cf0f
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/pre_build_csharp.bat
@@ -0,0 +1,43 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Performs nuget restore step for C#.
+
+setlocal
+
+set ARCHITECTURE=%1
+
+@rem enter repo root
+cd /d %~dp0\..\..\..
+
+mkdir cmake
+cd cmake
+mkdir build
+cd build
+mkdir %ARCHITECTURE%
+cd %ARCHITECTURE%
+
+cmake -G "Visual Studio 14 2015" -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=OFF -DgRPC_MSVC_STATIC_RUNTIME=ON ../../.. || goto :error
+
+cd ..\..\..\src\csharp
+
+dotnet restore Grpc.sln || goto :error
+
+endlocal
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/grpc/tools/run_tests/helper_scripts/pre_build_csharp.sh b/grpc/tools/run_tests/helper_scripts/pre_build_csharp.sh
new file mode 100755
index 00000000..7d83986f
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/pre_build_csharp.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# cd to repository root
+cd "$(dirname "$0")/../../.."
+
+mkdir -p cmake/build
+cd cmake/build
+
+cmake -DgRPC_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" ../..
+
+cd ../../src/csharp
+
+dotnet restore Grpc.sln
diff --git a/grpc/tools/run_tests/helper_scripts/pre_build_ruby.sh b/grpc/tools/run_tests/helper_scripts/pre_build_ruby.sh
new file mode 100755
index 00000000..b5740963
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/pre_build_ruby.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+export GRPC_CONFIG=${CONFIG:-opt}
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+tools/run_tests/helper_scripts/bundle_install_wrapper.sh
diff --git a/grpc/tools/run_tests/helper_scripts/prep_xds.sh b/grpc/tools/run_tests/helper_scripts/prep_xds.sh
new file mode 100755
index 00000000..ab15e2eb
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/prep_xds.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+sudo apt-get install -y python3-pip
+sudo python3 -m pip install --upgrade pip
+sudo python3 -m pip install grpcio grpcio-tools google-api-python-client google-auth-httplib2 oauth2client
+
+# Prepare generated Python code.
+TOOLS_DIR=tools/run_tests
+PROTO_SOURCE_DIR=src/proto/grpc/testing
+PROTO_DEST_DIR=${TOOLS_DIR}/${PROTO_SOURCE_DIR}
+mkdir -p ${PROTO_DEST_DIR}
+
+python3 -m grpc_tools.protoc \
+ --proto_path=. \
+ --python_out=${TOOLS_DIR} \
+ --grpc_python_out=${TOOLS_DIR} \
+ ${PROTO_SOURCE_DIR}/test.proto \
+ ${PROTO_SOURCE_DIR}/messages.proto \
+ ${PROTO_SOURCE_DIR}/empty.proto
diff --git a/grpc/tools/run_tests/helper_scripts/run_grpc-node.sh b/grpc/tools/run_tests/helper_scripts/run_grpc-node.sh
new file mode 100755
index 00000000..f54d69e9
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/run_grpc-node.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This script runs grpc/grpc-node tests with their grpc submodule updated
+# to this reference
+
+set -ex
+
+# cd to gRPC root directory
+cd "$(dirname "$0")/../../.."
+
+CURRENT_COMMIT="$(git rev-parse --verify HEAD)"
+
+rm -rf ./../grpc-node
+git clone --recursive https://github.com/grpc/grpc-node ./../grpc-node
+cd ./../grpc-node
+
+echo "TODO(jtattermusch): Skipping grpc-node's ./test-grpc-submodule.sh $CURRENT_COMMIT"
+echo "because it currently doesn't provide any useful signal."
+echo "See b/152833238"
+#./test-grpc-submodule.sh "$CURRENT_COMMIT"
diff --git a/grpc/tools/run_tests/helper_scripts/run_python.sh b/grpc/tools/run_tests/helper_scripts/run_python.sh
new file mode 100755
index 00000000..2b7321e5
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/run_python.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+PYTHON=$(realpath "${1:-py27/bin/python}")
+
+ROOT=$(pwd)
+
+$PYTHON "$ROOT/src/python/grpcio_tests/setup.py" "$2"
+
+mkdir -p "$ROOT/reports"
+rm -rf "$ROOT/reports/python-coverage"
+(mv -T "$ROOT/htmlcov" "$ROOT/reports/python-coverage") || true
+
diff --git a/grpc/tools/run_tests/helper_scripts/run_ruby.sh b/grpc/tools/run_tests/helper_scripts/run_ruby.sh
new file mode 100755
index 00000000..4e9c2128
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/run_ruby.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+rake
diff --git a/grpc/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh b/grpc/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh
new file mode 100755
index 00000000..fc0759fc
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# change to grpc repo root
+cd "$(dirname "$0")/../../.."
+
+EXIT_CODE=0
+time ruby src/ruby/end2end/sig_handling_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/channel_state_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/channel_closing_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/sig_int_during_channel_watch_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/killed_client_thread_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/forking_client_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/grpc_class_init_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/multiple_killed_watching_threads_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/load_grpc_with_gc_stress_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/client_memory_usage_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/package_with_underscore_checker.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/graceful_sig_handling_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/graceful_sig_stop_driver.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/errors_load_before_grpc_lib.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/logger_load_before_grpc_lib.rb || EXIT_CODE=1
+time ruby src/ruby/end2end/status_codes_load_before_grpc_lib.rb || EXIT_CODE=1
+exit $EXIT_CODE
diff --git a/grpc/tools/run_tests/helper_scripts/run_tests_in_workspace.sh b/grpc/tools/run_tests/helper_scripts/run_tests_in_workspace.sh
new file mode 100755
index 00000000..fa7a7aac
--- /dev/null
+++ b/grpc/tools/run_tests/helper_scripts/run_tests_in_workspace.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Create a workspace in a subdirectory to allow running multiple builds in isolation.
+# WORKSPACE_NAME env variable needs to contain name of the workspace to create.
+# All cmdline args will be passed to run_tests.py script (executed in the
+# newly created workspace)
+set -ex
+
+cd "$(dirname "$0")/../../.."
+repo_root="$(pwd)"
+export repo_root
+
+rm -rf "${WORKSPACE_NAME}"
+git clone . "${WORKSPACE_NAME}"
+# clone gRPC submodules, use data from locally cloned submodules where possible
+# shellcheck disable=SC2016,SC1004
+git submodule foreach 'cd "${repo_root}/${WORKSPACE_NAME}" \
+ && git submodule update --init --reference ${repo_root}/${name} ${name}'
+
+echo "Running run_tests.py in workspace ${WORKSPACE_NAME}"
+python "${WORKSPACE_NAME}/tools/run_tests/run_tests.py" "$@"
diff --git a/grpc/tools/run_tests/interop/interop_html_report.template b/grpc/tools/run_tests/interop/interop_html_report.template
new file mode 100644
index 00000000..6d9de5c6
--- /dev/null
+++ b/grpc/tools/run_tests/interop/interop_html_report.template
@@ -0,0 +1,201 @@
+<!DOCTYPE html>
+<html lang="en">
+<head><title>Interop Test Result</title></head>
+<body>
+
+<%def name="fill_one_test_result(shortname, resultset)">
+ % if shortname in resultset:
+ ## Because interop tests does not have runs_per_test flag, each test is
+ ## run once. So there should only be one element for each result.
+ <% result = resultset[shortname][0] %>
+ % if result.state == 'PASSED':
+ <td bgcolor="green">PASS</td>
+ % else:
+ <%
+ tooltip = ''
+ if result.returncode > 0 or result.message:
+ if result.returncode > 0:
+ tooltip = 'returncode: %d ' % result.returncode
+ if result.message:
+ tooltip = '%smessage: %s' % (tooltip, result.message)
+ %>
+ % if result.state == 'FAILED':
+ <td bgcolor="red">
+ % if tooltip:
+ <a href="#" data-toggle="tooltip" data-placement="auto" title="${tooltip | h}">FAIL</a></td>
+ % else:
+ FAIL</td>
+ % endif
+ % elif result.state == 'TIMEOUT':
+ <td bgcolor="yellow">
+ % if tooltip:
+ <a href="#" data-toggle="tooltip" data-placement="auto" title="${tooltip | h}">TIMEOUT</a></td>
+ % else:
+ TIMEOUT</td>
+ % endif
+ % endif
+ % endif
+ % else:
+ <td bgcolor="magenta">Not implemented</td>
+ % endif
+</%def>
+
+<%def name="fill_one_http2_test_result(shortname, resultset)">
+ ## keep this mostly in sync with the template above
+ % if shortname in resultset:
+ ## Because interop tests does not have runs_per_test flag, each test is
+ ## run once. So there should only be one element for each result.
+ <% result = resultset[shortname][0] %>
+ % if result.http2results:
+ <td bgcolor="white">
+ <div style="width:95%; border: 1px solid black; position: relative; padding: 3px;">
+ <span style="position: absolute; left: 45%;">${int(result.http2results['percent'] * 100)}&#37;</span>
+ <div style="height: 20px;
+ background-color: hsl(${result.http2results['percent'] * 120}, 100%, 50%);
+ width: ${result.http2results['percent'] * 100}%;"
+ title="${result.http2results['failed_cases'] | h}"></div>
+ </div>
+ </td>
+ % else:
+ <td bgcolor="red">No result is found!</td>
+ % endif
+ % else:
+ <td bgcolor="magenta">Not implemented</td>
+ % endif
+</%def>
+
+<%def name="display_cloud_to_prod_result(prod_server)">
+ ## Each column header is the client language.
+ <table style="width:100%" border="1">
+ <tr bgcolor="#00BFFF">
+ <th>Client languages &#9658;<br/>Test Cases &#9660;</th>
+ % for client_lang in client_langs:
+ <th>${client_lang}</th>
+ % endfor
+ </tr>
+ % for test_case in test_cases + auth_test_cases:
+ <tr><td><b>${test_case}</b></td>
+ % for client_lang in client_langs:
+ <%
+ if test_case in auth_test_cases:
+ shortname = 'cloud_to_prod_auth:%s:%s:%s' % (
+ prod_server, client_lang, test_case)
+ else:
+ shortname = 'cloud_to_prod:%s:%s:%s' % (
+ prod_server, client_lang, test_case)
+ %>
+ ${fill_one_test_result(shortname, resultset)}
+ % endfor
+ </tr>
+ % endfor
+ </table>
+</%def>
+
+% if num_failures > 1:
+ <p><h2><font color="red">${num_failures} tests failed!</font></h2></p>
+% elif num_failures:
+ <p><h2><font color="red">${num_failures} test failed!</font></h2></p>
+% else:
+ <p><h2><font color="green">All tests passed!</font></h2></p>
+% endif
+
+% if cloud_to_prod:
+ % for prod_server in prod_servers:
+ <h2>Cloud to ${prod_server}</h2>
+ ${display_cloud_to_prod_result(prod_server)}
+ % endfor
+% endif
+
+% if http2_server_cases:
+ <h2>HTTP/2 Server Tests</h2>
+ ## Each column header is the client language.
+ <table style="width:100%" border="1">
+ <tr bgcolor="#00BFFF">
+ <th>Client languages &#9658;<br/>Test Cases &#9660;</th>
+ % for client_lang in client_langs:
+ <th>${client_lang}</th>
+ % endfor
+ </tr>
+ % for test_case in http2_server_cases:
+ <tr><td><b>${test_case}</b></td>
+ % for client_lang in client_langs:
+ <%
+ shortname = 'cloud_to_cloud:%s:http2_server:%s' % (client_lang,
+ test_case)
+ %>
+ ${fill_one_test_result(shortname, resultset)}
+ % endfor
+ </tr>
+ % endfor
+ </table>
+% endif
+
+% if http2_interop:
+ ## Each column header is the server language.
+ <h2>HTTP/2 Interop</h2>
+ <table style="width:100%" border="1">
+ <tr bgcolor="#00BFFF">
+ <th>Servers &#9658;<br/>Test Cases &#9660;</th>
+ % for server_lang in server_langs:
+ <th>${server_lang}</th>
+ % endfor
+ % if cloud_to_prod:
+ % for prod_server in prod_servers:
+ <th>${prod_server}</th>
+ % endfor
+ % endif
+ </tr>
+ % for test_case in http2_cases:
+ <tr><td><b>${test_case}</b></td>
+ ## Fill up the cells with test result.
+ % for server_lang in server_langs:
+ <%
+ shortname = 'cloud_to_cloud:http2:%s_server:%s' % (
+ server_lang, test_case)
+ %>
+ ${fill_one_http2_test_result(shortname, resultset)}
+ % endfor
+ % if cloud_to_prod:
+ % for prod_server in prod_servers:
+ <% shortname = 'cloud_to_prod:%s:http2:%s' % (prod_server, test_case) %>
+ ${fill_one_http2_test_result(shortname, resultset)}
+ % endfor
+ % endif
+ </tr>
+ % endfor
+ </table>
+% endif
+
+% if server_langs:
+ % for test_case in test_cases:
+ ## Each column header is the client language.
+ <h2>${test_case}</h2>
+ <table style="width:100%" border="1">
+ <tr bgcolor="#00BFFF">
+ <th>Client languages &#9658;<br/>Server languages &#9660;</th>
+ % for client_lang in client_langs:
+ <th>${client_lang}</th>
+ % endfor
+ </tr>
+ ## Each row head is the server language.
+ % for server_lang in server_langs:
+ <tr>
+ <td><b>${server_lang}</b></td>
+ % for client_lang in client_langs:
+ <%
+ shortname = 'cloud_to_cloud:%s:%s_server:%s' % (
+ client_lang, server_lang, test_case)
+ %>
+ ${fill_one_test_result(shortname, resultset)}
+ % endfor
+ </tr>
+ % endfor
+ </table>
+ % endfor
+% endif
+
+<script>
+ $(document).ready(function(){$('[data-toggle="tooltip"]').tooltip();});
+</script>
+</body>
+</html>
diff --git a/grpc/tools/run_tests/interop/with_nvm.sh b/grpc/tools/run_tests/interop/with_nvm.sh
new file mode 100755
index 00000000..55f4b2b8
--- /dev/null
+++ b/grpc/tools/run_tests/interop/with_nvm.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Makes sure NVM is loaded before executing the command passed as an argument
+# shellcheck disable=SC1090
+source ~/.nvm/nvm.sh
+"$@"
diff --git a/grpc/tools/run_tests/interop/with_rvm.sh b/grpc/tools/run_tests/interop/with_rvm.sh
new file mode 100755
index 00000000..82bce9da
--- /dev/null
+++ b/grpc/tools/run_tests/interop/with_rvm.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Makes sure RVM is loaded before executing the command passed as an argument
+# shellcheck disable=SC1091
+source /usr/local/rvm/scripts/rvm
+"$@"
diff --git a/grpc/tools/run_tests/lb_interop_tests/gen_build_yaml.py b/grpc/tools/run_tests/lb_interop_tests/gen_build_yaml.py
new file mode 100755
index 00000000..1a36e1e3
--- /dev/null
+++ b/grpc/tools/run_tests/lb_interop_tests/gen_build_yaml.py
@@ -0,0 +1,345 @@
+#!/usr/bin/env python2.7
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Generates the appropriate JSON data for LB interop test scenarios."""
+
+import json
+import os
+import yaml
+
+all_scenarios = []
+
+# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
+# client_falls_back_because_no_backends_* scenarios for Java/Go.
+
+# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
+# *short_stream* scenarios for Java.
+
+# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
+# Java TLS tests involving TLS to the balancer.
+
+
+def server_sec(transport_sec):
+ if transport_sec == 'google_default_credentials':
+ return 'alts', 'alts', 'tls'
+ return transport_sec, transport_sec, transport_sec
+
+
+def generate_no_balancer_because_lb_a_record_returns_nx_domain():
+ all_configs = []
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ config = {
+ 'name':
+ 'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
+ transport_sec,
+ 'skip_langs': [],
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [],
+ 'backend_configs': [],
+ 'fallback_configs': [{
+ 'transport_sec': fallback_sec,
+ }],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
+
+
+def generate_no_balancer_because_lb_a_record_returns_no_data():
+ all_configs = []
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ config = {
+ 'name':
+ 'no_balancer_because_lb_a_record_returns_no_data_%s' %
+ transport_sec,
+ 'skip_langs': [],
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [],
+ 'backend_configs': [],
+ 'fallback_configs': [{
+ 'transport_sec': fallback_sec,
+ }],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ True,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
+
+
+def generate_client_referred_to_backend():
+ all_configs = []
+ for balancer_short_stream in [True, False]:
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = []
+ if transport_sec == 'tls':
+ skip_langs += ['java']
+ if balancer_short_stream:
+ skip_langs += ['java']
+ config = {
+ 'name':
+ 'client_referred_to_backend_%s_short_stream_%s' %
+ (transport_sec, balancer_short_stream),
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [{
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ }],
+ 'backend_configs': [{
+ 'transport_sec': backend_sec,
+ }],
+ 'fallback_configs': [],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_referred_to_backend()
+
+
+def generate_client_referred_to_backend_fallback_broken():
+ all_configs = []
+ for balancer_short_stream in [True, False]:
+ for transport_sec in ['alts', 'tls', 'google_default_credentials']:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = []
+ if transport_sec == 'tls':
+ skip_langs += ['java']
+ if balancer_short_stream:
+ skip_langs += ['java']
+ config = {
+ 'name':
+ 'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
+ % (transport_sec, balancer_short_stream),
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [{
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ }],
+ 'backend_configs': [{
+ 'transport_sec': backend_sec,
+ }],
+ 'fallback_configs': [{
+ 'transport_sec': 'insecure',
+ }],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_referred_to_backend_fallback_broken()
+
+
+def generate_client_referred_to_backend_multiple_backends():
+ all_configs = []
+ for balancer_short_stream in [True, False]:
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = []
+ if transport_sec == 'tls':
+ skip_langs += ['java']
+ if balancer_short_stream:
+ skip_langs += ['java']
+ config = {
+ 'name':
+ 'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
+ % (transport_sec, balancer_short_stream),
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [{
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ }],
+ 'backend_configs': [{
+ 'transport_sec': backend_sec,
+ }, {
+ 'transport_sec': backend_sec,
+ }, {
+ 'transport_sec': backend_sec,
+ }, {
+ 'transport_sec': backend_sec,
+ }, {
+ 'transport_sec': backend_sec,
+ }],
+ 'fallback_configs': [],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_referred_to_backend_multiple_backends()
+
+
+def generate_client_falls_back_because_no_backends():
+ all_configs = []
+ for balancer_short_stream in [True, False]:
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = ['go', 'java']
+ if transport_sec == 'tls':
+ skip_langs += ['java']
+ if balancer_short_stream:
+ skip_langs += ['java']
+ config = {
+ 'name':
+ 'client_falls_back_because_no_backends_%s_short_stream_%s' %
+ (transport_sec, balancer_short_stream),
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [{
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ }],
+ 'backend_configs': [],
+ 'fallback_configs': [{
+ 'transport_sec': fallback_sec,
+ }],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_falls_back_because_no_backends()
+
+
+def generate_client_falls_back_because_balancer_connection_broken():
+ all_configs = []
+ for transport_sec in ['alts', 'tls', 'google_default_credentials']:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = []
+ if transport_sec == 'tls':
+ skip_langs = ['java']
+ config = {
+ 'name':
+ 'client_falls_back_because_balancer_connection_broken_%s' %
+ transport_sec,
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [{
+ 'transport_sec': 'insecure',
+ 'short_stream': False,
+ }],
+ 'backend_configs': [],
+ 'fallback_configs': [{
+ 'transport_sec': fallback_sec,
+ }],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
+
+
+def generate_client_referred_to_backend_multiple_balancers():
+ all_configs = []
+ for balancer_short_stream in [True, False]:
+ for transport_sec in [
+ 'insecure', 'alts', 'tls', 'google_default_credentials'
+ ]:
+ balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
+ skip_langs = []
+ if transport_sec == 'tls':
+ skip_langs += ['java']
+ if balancer_short_stream:
+ skip_langs += ['java']
+ config = {
+ 'name':
+ 'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
+ % (transport_sec, balancer_short_stream),
+ 'skip_langs':
+ skip_langs,
+ 'transport_sec':
+ transport_sec,
+ 'balancer_configs': [
+ {
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ },
+ {
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ },
+ {
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ },
+ {
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ },
+ {
+ 'transport_sec': balancer_sec,
+ 'short_stream': balancer_short_stream,
+ },
+ ],
+ 'backend_configs': [{
+ 'transport_sec': backend_sec,
+ },],
+ 'fallback_configs': [],
+ 'cause_no_error_no_data_for_balancer_a_record':
+ False,
+ }
+ all_configs.append(config)
+ return all_configs
+
+
+all_scenarios += generate_client_referred_to_backend_multiple_balancers()
+
+print(yaml.dump({
+ 'lb_interop_test_scenarios': all_scenarios,
+}))
diff --git a/grpc/tools/run_tests/performance/README.md b/grpc/tools/run_tests/performance/README.md
new file mode 100644
index 00000000..6b41e978
--- /dev/null
+++ b/grpc/tools/run_tests/performance/README.md
@@ -0,0 +1,134 @@
+# Overview of performance test suite, with steps for manual runs:
+
+For design of the tests, see
+https://grpc.io/docs/guides/benchmarking.html.
+
+## Pre-reqs for running these manually:
+In general the benchmark workers and driver build scripts expect
+[linux_performance_worker_init.sh](../../gce/linux_performance_worker_init.sh) to have been ran already.
+
+### To run benchmarks locally:
+* From the grpc repo root, start the
+[run_performance_tests.py](../run_performance_tests.py) runner script.
+
+### On remote machines, to start the driver and workers manually:
+The [run_performance_test.py](../run_performance_tests.py) top-level runner script can also
+be used with remote machines, but for e.g., profiling the server,
+it might be useful to run workers manually.
+
+1. You'll need a "driver" and separate "worker" machines.
+For example, you might use one GCE "driver" machine and 3 other
+GCE "worker" machines that are in the same zone.
+
+2. Connect to each worker machine and start up a benchmark worker with a "driver_port".
+ * For example, to start the grpc-go benchmark worker:
+ [grpc-go worker main.go](https://github.com/grpc/grpc-go/blob/master/benchmark/worker/main.go) --driver_port <driver_port>
+
+#### Commands to start workers in different languages:
+ * Note that these commands are what the top-level
+ [run_performance_test.py](../run_performance_tests.py) script uses to
+ build and run different workers through the
+ [build_performance.sh](./build_performance.sh) script and "run worker"
+ scripts (such as the [run_worker_java.sh](./run_worker_java.sh)).
+
+##### Running benchmark workers for C-core wrapped languages (C++, Python, C#, Node, Ruby):
+ * These are more simple since they all live in the main grpc repo.
+
+```
+$ cd <grpc_repo_root>
+$ tools/run_tests/performance/build_performance.sh
+$ tools/run_tests/performance/run_worker_<language>.sh
+```
+
+ * Note that there is one "run_worker" script per language, e.g.,
+ [run_worker_csharp.sh](./run_worker_csharp.sh) for c#.
+
+##### Running benchmark workers for gRPC-Java:
+ * You'll need the [grpc-java](https://github.com/grpc/grpc-java) repo.
+
+```
+$ cd <grpc-java-repo>
+$ ./gradlew -PskipCodegen=true -PskipAndroid=true :grpc-benchmarks:installDist
+$ benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker --driver_port <driver_port>
+```
+
+##### Running benchmark workers for gRPC-Go:
+ * You'll need the [grpc-go repo](https://github.com/grpc/grpc-go)
+
+```
+$ cd <grpc-go-repo>/benchmark/worker && go install
+$ # if profiling, it might be helpful to turn off inlining by building with "-gcflags=-l"
+$ $GOPATH/bin/worker --driver_port <driver_port>
+```
+
+#### Build the driver:
+* Connect to the driver machine (if using a remote driver) and from the grpc repo root:
+```
+$ tools/run_tests/performance/build_performance.sh
+```
+
+#### Run the driver:
+1. Get the 'scenario_json' relevant for the scenario to run. Note that "scenario
+ json" configs are generated from [scenario_config.py](./scenario_config.py).
+ The [driver](../../../test/cpp/qps/qps_json_driver.cc) takes a list of these configs as a json string of the form: `{scenario: <json_list_of_scenarios> }`
+ in its `--scenarios_json` command argument.
+ One quick way to get a valid json string to pass to the driver is by running
+ the [run_performance_tests.py](./run_performance_tests.py) locally and copying the logged scenario json command arg.
+
+2. From the grpc repo root:
+
+* Set `QPS_WORKERS` environment variable to a comma separated list of worker
+machines. Note that the driver will start the "benchmark server" on the first
+entry in the list, and the rest will be told to run as clients against the
+benchmark server.
+
+Example running and profiling of go benchmark server:
+```
+$ export QPS_WORKERS=<host1>:<10000>,<host2>,10000,<host3>:10000
+$ bins/opt/qps_json_driver --scenario_json='<scenario_json_scenario_config_string>'
+```
+
+### Example profiling commands
+
+While running the benchmark, a profiler can be attached to the server.
+
+Example to count syscalls in grpc-go server during a benchmark:
+* Connect to server machine and run:
+```
+$ netstat -tulpn | grep <driver_port> # to get pid of worker
+$ perf stat -p <worker_pid> -e syscalls:sys_enter_write # stop after test complete
+```
+
+Example memory profile of grpc-go server, with `go tools pprof`:
+* After a run is done on the server, see its alloc profile with:
+```
+$ go tool pprof --text --alloc_space http://localhost:<pprof_port>/debug/heap
+```
+
+### Configuration environment variables:
+
+* QPS_WORKER_CHANNEL_CONNECT_TIMEOUT
+
+ Consuming process: qps_worker
+
+ Type: integer (number of seconds)
+
+ This can be used to configure the amount of time that benchmark
+ clients wait for channels to the benchmark server to become ready.
+ This is useful in certain benchmark environments in which the
+ server can take a long time to become ready. Note: if setting
+ this to a high value, then the scenario config under test should
+ probably also have a large "warmup_seconds".
+
+* QPS_WORKERS
+
+ Consuming process: qps_json_driver
+
+ Type: comma separated list of host:port
+
+ Set this to a comma separated list of QPS worker processes/machines.
+ Each scenario in a scenario config has specifies a certain number
+ of servers, `num_servers`, and the driver will start
+ "benchmark servers"'s on the first `num_server` `host:port` pairs in
+ the comma separated list. The rest will be told to run as clients
+ against the benchmark server.
diff --git a/grpc/tools/run_tests/performance/__init__.py b/grpc/tools/run_tests/performance/__init__.py
new file mode 100644
index 00000000..5772620b
--- /dev/null
+++ b/grpc/tools/run_tests/performance/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/grpc/tools/run_tests/performance/bq_upload_result.py b/grpc/tools/run_tests/performance/bq_upload_result.py
new file mode 100755
index 00000000..be1e0c3f
--- /dev/null
+++ b/grpc/tools/run_tests/performance/bq_upload_result.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Uploads performance benchmark result file to bigquery.
+
+from __future__ import print_function
+
+import argparse
+import calendar
+import json
+import os
+import sys
+import time
+import uuid
+import massage_qps_stats
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_PROJECT_ID = 'grpc-testing'
+
+
+def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
+ with open(result_file, 'r') as f:
+ (col1, col2, col3) = f.read().split(',')
+ latency50 = float(col1.strip()) * 1000
+ latency90 = float(col2.strip()) * 1000
+ latency99 = float(col3.strip()) * 1000
+
+ scenario_result = {
+ 'scenario': {
+ 'name': 'netperf_tcp_rr'
+ },
+ 'summary': {
+ 'latency50': latency50,
+ 'latency90': latency90,
+ 'latency99': latency99
+ }
+ }
+
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
+
+ if not _insert_result(
+ bq, dataset_id, table_id, scenario_result, flatten=False):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
+
+
+def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
+ with open(result_file, 'r') as f:
+ scenario_result = json.loads(f.read())
+
+ bq = big_query_utils.create_big_query()
+ _create_results_table(bq, dataset_id, table_id)
+
+ if not _insert_result(bq, dataset_id, table_id, scenario_result):
+ print('Error uploading result to bigquery.')
+ sys.exit(1)
+
+
+def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
+ if flatten:
+ _flatten_result_inplace(scenario_result)
+ _populate_metadata_inplace(scenario_result)
+ row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
+ return big_query_utils.insert_rows(bq, _PROJECT_ID, dataset_id, table_id,
+ [row])
+
+
+def _create_results_table(bq, dataset_id, table_id):
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema, desc)
+
+
+def _flatten_result_inplace(scenario_result):
+ """Bigquery is not really great for handling deeply nested data
+ and repeated fields. To maintain values of some fields while keeping
+ the schema relatively simple, we artificially leave some of the fields
+ as JSON strings.
+ """
+ scenario_result['scenario']['clientConfig'] = json.dumps(
+ scenario_result['scenario']['clientConfig'])
+ scenario_result['scenario']['serverConfig'] = json.dumps(
+ scenario_result['scenario']['serverConfig'])
+ scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+ scenario_result['serverCpuStats'] = []
+ for stats in scenario_result['serverStats']:
+ scenario_result['serverCpuStats'].append(dict())
+ scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop(
+ 'totalCpuTime', None)
+ scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop(
+ 'idleCpuTime', None)
+ for stats in scenario_result['clientStats']:
+ stats['latencies'] = json.dumps(stats['latencies'])
+ stats.pop('requestResults', None)
+ scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+ scenario_result['clientSuccess'] = json.dumps(
+ scenario_result['clientSuccess'])
+ scenario_result['serverSuccess'] = json.dumps(
+ scenario_result['serverSuccess'])
+ scenario_result['requestResults'] = json.dumps(
+ scenario_result.get('requestResults', []))
+ scenario_result['serverCpuUsage'] = scenario_result['summary'].pop(
+ 'serverCpuUsage', None)
+ scenario_result['summary'].pop('successfulRequestsPerSecond', None)
+ scenario_result['summary'].pop('failedRequestsPerSecond', None)
+ massage_qps_stats.massage_qps_stats(scenario_result)
+
+
+def _populate_metadata_inplace(scenario_result):
+ """Populates metadata based on environment variables set by Jenkins."""
+ # NOTE: Grabbing the Kokoro environment variables will only work if the
+ # driver is running locally on the same machine where Kokoro has started
+ # the job. For our setup, this is currently the case, so just assume that.
+ build_number = os.getenv('KOKORO_BUILD_NUMBER')
+ build_url = 'https://source.cloud.google.com/results/invocations/%s' % os.getenv(
+ 'KOKORO_BUILD_ID')
+ job_name = os.getenv('KOKORO_JOB_NAME')
+ git_commit = os.getenv('KOKORO_GIT_COMMIT')
+ # actual commit is the actual head of PR that is getting tested
+ # TODO(jtattermusch): unclear how to obtain on Kokoro
+ git_actual_commit = os.getenv('ghprbActualCommit')
+
+ utc_timestamp = str(calendar.timegm(time.gmtime()))
+ metadata = {'created': utc_timestamp}
+
+ if build_number:
+ metadata['buildNumber'] = build_number
+ if build_url:
+ metadata['buildUrl'] = build_url
+ if job_name:
+ metadata['jobName'] = job_name
+ if git_commit:
+ metadata['gitCommit'] = git_commit
+ if git_actual_commit:
+ metadata['gitActualCommit'] = git_actual_commit
+
+ scenario_result['metadata'] = metadata
+
+
+argp = argparse.ArgumentParser(description='Upload result to big query.')
+argp.add_argument('--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+argp.add_argument('--file_to_upload',
+ default='scenario_result.json',
+ type=str,
+ help='Report file to upload.')
+argp.add_argument('--file_format',
+ choices=['scenario_result', 'netperf_latency_csv'],
+ default='scenario_result',
+ help='Format of the file to upload.')
+
+args = argp.parse_args()
+
+dataset_id, table_id = args.bq_result_table.split('.', 2)
+
+if args.file_format == 'netperf_latency_csv':
+ _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
+else:
+ _upload_scenario_result_to_bigquery(dataset_id, table_id,
+ args.file_to_upload)
+print('Successfully uploaded %s to BigQuery.\n' % args.file_to_upload)
diff --git a/grpc/tools/run_tests/performance/build_performance.sh b/grpc/tools/run_tests/performance/build_performance.sh
new file mode 100755
index 00000000..bb48c3f4
--- /dev/null
+++ b/grpc/tools/run_tests/performance/build_performance.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# shellcheck disable=SC1090
+source ~/.rvm/scripts/rvm
+set -ex
+
+cd "$(dirname "$0")/../../.."
+bazel=$(pwd)/tools/bazel
+
+CONFIG=${CONFIG:-opt}
+
+# build C++ qps worker & driver always - we need at least the driver to
+# run any of the scenarios.
+# TODO(jtattermusch): C++ worker and driver are not buildable on Windows yet
+if [ "$OSTYPE" != "msys" ]
+then
+ # build C++ with cmake as building with "make" disables boringssl assembly
+ # optimizations that can have huge impact on secure channel throughput.
+ mkdir -p cmake/build
+ cd cmake/build
+ cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release ../..
+ make qps_worker qps_json_driver -j8
+ cd ../..
+ # unbreak subsequent make builds by restoring zconf.h (previously renamed by cmake build)
+ # See https://github.com/grpc/grpc/issues/11581
+ (cd third_party/zlib; git checkout zconf.h)
+fi
+
+PHP_ALREADY_BUILT=""
+for language in "$@"
+do
+ case "$language" in
+ "c++")
+ ;; # C++ has already been built.
+ "java")
+ (cd ../grpc-java/ &&
+ ./gradlew -PskipCodegen=true -PskipAndroid=true :grpc-benchmarks:installDist)
+ ;;
+ "go")
+ tools/run_tests/performance/build_performance_go.sh
+ ;;
+ "php7"|"php7_protobuf_c")
+ if [ -n "$PHP_ALREADY_BUILT" ]; then
+ echo "Skipping PHP build as already built by $PHP_ALREADY_BUILT"
+ else
+ PHP_ALREADY_BUILT=$language
+ tools/run_tests/performance/build_performance_php7.sh
+ fi
+ ;;
+ "csharp")
+ python tools/run_tests/run_tests.py -l "$language" -c "$CONFIG" --build_only -j 8
+ # unbreak subsequent make builds by restoring zconf.h (previously renamed by cmake portion of C#'s build)
+ # See https://github.com/grpc/grpc/issues/11581
+ (cd third_party/zlib; git checkout zconf.h)
+ ;;
+ "node"|"node_purejs")
+ tools/run_tests/performance/build_performance_node.sh
+ ;;
+ "python")
+ $bazel build -c opt //src/python/grpcio_tests/tests/qps:qps_worker
+ ;;
+ "python_asyncio")
+ $bazel build -c opt //src/python/grpcio_tests/tests_aio/benchmark:worker
+ ;;
+ "rust")
+ (cd ../grpc-rs/ && cargo build -p benchmark --release)
+ ;;
+ *)
+ python tools/run_tests/run_tests.py -l "$language" -c "$CONFIG" --build_only -j 8
+ ;;
+ esac
+done
diff --git a/grpc/tools/run_tests/performance/build_performance_go.sh b/grpc/tools/run_tests/performance/build_performance_go.sh
new file mode 100755
index 00000000..3aa203a6
--- /dev/null
+++ b/grpc/tools/run_tests/performance/build_performance_go.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+GOPATH=$(pwd)/../gopath
+export GOPATH
+
+# Get grpc-go and the dependencies but get rid of the upstream/master version
+go get google.golang.org/grpc
+rm -rf "${GOPATH}/src/google.golang.org/grpc"
+
+# Get the revision of grpc-go we want to test
+git clone --recursive ../grpc-go "${GOPATH}/src/google.golang.org/grpc"
+
+(cd "${GOPATH}/src/google.golang.org/grpc/benchmark/worker" && go install)
diff --git a/grpc/tools/run_tests/performance/build_performance_node.sh b/grpc/tools/run_tests/performance/build_performance_node.sh
new file mode 100755
index 00000000..b5b5d9a1
--- /dev/null
+++ b/grpc/tools/run_tests/performance/build_performance_node.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set +ex
+
+# shellcheck disable=SC1090
+. "$HOME/.nvm/nvm.sh"
+
+nvm install 10
+
+set -ex
+
+cd "$(dirname "$0")/../../../../grpc-node"
+
+npm install
+
+./node_modules/.bin/gulp setup
diff --git a/grpc/tools/run_tests/performance/build_performance_php7.sh b/grpc/tools/run_tests/performance/build_performance_php7.sh
new file mode 100755
index 00000000..386c7862
--- /dev/null
+++ b/grpc/tools/run_tests/performance/build_performance_php7.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+CONFIG=${CONFIG:-opt}
+python tools/run_tests/run_tests.py -l php7 -c "$CONFIG" --build_only -j 8
+
+# Set up all dependences needed for PHP QPS test
+cd src/php/tests/qps
+composer install
+# Install protobuf C-extension for php
+cd ../../../../third_party/protobuf/php/ext/google/protobuf
+phpize
+./configure
+make
+
diff --git a/grpc/tools/run_tests/performance/kill_workers.sh b/grpc/tools/run_tests/performance/kill_workers.sh
new file mode 100755
index 00000000..95a5bf5d
--- /dev/null
+++ b/grpc/tools/run_tests/performance/kill_workers.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# Make sure there are no pre-existing QPS workers around before starting
+# the performance test suite
+
+# C++
+killall -9 qps_worker || true
+
+# C#
+# shellcheck disable=SC2009
+ps -C mono -o pid=,cmd= | grep QpsWorker | awk '{print $1}' | xargs kill -9 || true
+# shellcheck disable=SC2009
+ps -C dotnet -o pid=,cmd= | grep QpsWorker | awk '{print $1}' | xargs kill -9 || true
+
+# Ruby
+# shellcheck disable=SC2009
+ps -C ruby -o pid=,cmd= | grep 'qps/worker.rb' | awk '{print $1}' | xargs kill -9 || true
+
+# Python
+# shellcheck disable=SC2009
+ps -C python -o pid=,cmd= | grep 'qps_worker.py' | awk '{print $1}' | xargs kill -9 || true
+
+# Java
+jps | grep LoadWorker | awk '{print $1}' | xargs kill -9 || true
+
+# Go
+killall -9 worker || true
diff --git a/grpc/tools/run_tests/performance/massage_qps_stats.py b/grpc/tools/run_tests/performance/massage_qps_stats.py
new file mode 100644
index 00000000..57de77a7
--- /dev/null
+++ b/grpc/tools/run_tests/performance/massage_qps_stats.py
@@ -0,0 +1,511 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Autogenerated by tools/codegen/core/gen_stats_data.py
+
+import massage_qps_stats_helpers
+
+
+def massage_qps_stats(scenario_result):
+ for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
+ if "coreStats" in stats:
+ # Get rid of the "coreStats" element and replace it by statistics
+ # that correspond to columns in the bigquery schema.
+ core_stats = stats["coreStats"]
+ del stats["coreStats"]
+ stats[
+ "core_client_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_calls_created")
+ stats[
+ "core_server_calls_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_calls_created")
+ stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "cqs_created")
+ stats[
+ "core_client_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_channels_created")
+ stats[
+ "core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "client_subchannels_created")
+ stats[
+ "core_server_channels_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_channels_created")
+ stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_poll")
+ stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_wait")
+ stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick")
+ stats[
+ "core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_without_poller")
+ stats[
+ "core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kicked_again")
+ stats[
+ "core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_fd")
+ stats[
+ "core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_wakeup_cv")
+ stats[
+ "core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_kick_own_thread")
+ stats["core_syscall_epoll_ctl"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_epoll_ctl")
+ stats[
+ "core_pollset_fd_cache_hits"] = massage_qps_stats_helpers.counter(
+ core_stats, "pollset_fd_cache_hits")
+ stats[
+ "core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
+ core_stats, "histogram_slow_lookups")
+ stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_write")
+ stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
+ core_stats, "syscall_read")
+ stats[
+ "core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_pollers_created")
+ stats[
+ "core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
+ core_stats, "tcp_backup_poller_polls")
+ stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_batches")
+ stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_cancel")
+ stats[
+ "core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_initial_metadata")
+ stats[
+ "core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_message")
+ stats[
+ "core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_send_trailing_metadata")
+ stats[
+ "core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_initial_metadata")
+ stats[
+ "core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_message")
+ stats[
+ "core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_op_recv_trailing_metadata")
+ stats[
+ "core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_settings_writes")
+ stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_pings_sent")
+ stats[
+ "core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_begun")
+ stats[
+ "core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_offloaded")
+ stats[
+ "core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_writes_continued")
+ stats[
+ "core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_partial_writes")
+ stats[
+ "core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_initial_write")
+ stats[
+ "core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_start_new_stream")
+ stats[
+ "core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_message")
+ stats[
+ "core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_send_initial_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_send_trailing_metadata")
+ stats[
+ "core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_retry_send_ping")
+ stats[
+ "core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_continue_pings")
+ stats[
+ "core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_goaway_sent")
+ stats[
+ "core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_rst_stream")
+ stats[
+ "core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_close_from_api")
+ stats[
+ "core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_stream_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control")
+ stats[
+ "core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_send_settings")
+ stats[
+ "core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_bdp_estimator_ping")
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_setting"
+ )
+ stats[
+ "core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_flow_control_unstalled_by_update"
+ )
+ stats[
+ "core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_application_ping")
+ stats[
+ "core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_keepalive_ping")
+ stats[
+ "core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
+ core_stats,
+ "http2_initiate_write_due_to_transport_flow_control_unstalled"
+ )
+ stats[
+ "core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_ping_response")
+ stats[
+ "core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_initiate_write_due_to_force_rst_stream")
+ stats[
+ "core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
+ core_stats, "http2_spurious_writes_begun")
+ stats[
+ "core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_indexed")
+ stats[
+ "core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx")
+ stats[
+ "core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_incidx_v")
+ stats[
+ "core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx")
+ stats[
+ "core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_notidx_v")
+ stats[
+ "core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx")
+ stats[
+ "core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_lithdr_nvridx_v")
+ stats[
+ "core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_uncompressed")
+ stats[
+ "core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_huffman")
+ stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary")
+ stats[
+ "core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_recv_binary_base64")
+ stats[
+ "core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_indexed")
+ stats[
+ "core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx")
+ stats[
+ "core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_incidx_v")
+ stats[
+ "core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx")
+ stats[
+ "core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_notidx_v")
+ stats[
+ "core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx")
+ stats[
+ "core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_lithdr_nvridx_v")
+ stats[
+ "core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_uncompressed")
+ stats[
+ "core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_huffman")
+ stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary")
+ stats[
+ "core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
+ core_stats, "hpack_send_binary_base64")
+ stats[
+ "core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_initiated")
+ stats[
+ "core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_items")
+ stats[
+ "core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_scheduled_final_items")
+ stats[
+ "core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
+ core_stats, "combiner_locks_offloaded")
+ stats[
+ "core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_initiated")
+ stats[
+ "core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_locks_scheduled_items")
+ stats[
+ "core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_set_notify_on_cancel")
+ stats[
+ "core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
+ core_stats, "call_combiner_cancelled")
+ stats[
+ "core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_short_items")
+ stats[
+ "core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_long_items")
+ stats[
+ "core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_scheduled_to_self")
+ stats[
+ "core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_wakeup_initiated")
+ stats[
+ "core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_queue_drained")
+ stats[
+ "core_executor_push_retries"] = massage_qps_stats_helpers.counter(
+ core_stats, "executor_push_retries")
+ stats[
+ "core_server_requested_calls"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_requested_calls")
+ stats[
+ "core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
+ core_stats, "server_slowpath_requests_queued")
+ stats[
+ "core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_failures")
+ stats[
+ "core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_trylock_successes")
+ stats[
+ "core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
+ core_stats, "cq_ev_queue_transient_pop_failures")
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "call_initial_size")
+ stats["core_call_initial_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_call_initial_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "poll_events_returned")
+ stats["core_poll_events_returned"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_poll_events_returned_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_write_size")
+ stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_write_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_write_iov_size")
+ stats["core_tcp_write_iov_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_tcp_write_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
+ stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_read_offer")
+ stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
+ stats["core_tcp_read_offer_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "tcp_read_offer_iov_size")
+ stats["core_tcp_read_offer_iov_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "http2_send_message_size")
+ stats["core_http2_send_message_size"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_message_size_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_initial_metadata_per_write")
+ stats["core_http2_send_initial_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_message_per_write")
+ stats["core_http2_send_message_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_message_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_trailing_metadata_per_write")
+ stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(
+ core_stats, "http2_send_flowctl_per_write")
+ stats["core_http2_send_flowctl_per_write"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
+ h = massage_qps_stats_helpers.histogram(core_stats,
+ "server_cqs_checked")
+ stats["core_server_cqs_checked"] = ",".join(
+ "%f" % x for x in h.buckets)
+ stats["core_server_cqs_checked_bkts"] = ",".join(
+ "%f" % x for x in h.boundaries)
+ stats[
+ "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 50, h.boundaries)
+ stats[
+ "core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 95, h.boundaries)
+ stats[
+ "core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
+ h.buckets, 99, h.boundaries)
diff --git a/grpc/tools/run_tests/performance/massage_qps_stats_helpers.py b/grpc/tools/run_tests/performance/massage_qps_stats_helpers.py
new file mode 100644
index 00000000..108451cd
--- /dev/null
+++ b/grpc/tools/run_tests/performance/massage_qps_stats_helpers.py
@@ -0,0 +1,62 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+
+def _threshold_for_count_below(buckets, boundaries, count_below):
+ count_so_far = 0
+ for lower_idx in range(0, len(buckets)):
+ count_so_far += buckets[lower_idx]
+ if count_so_far >= count_below:
+ break
+ if count_so_far == count_below:
+ # this bucket hits the threshold exactly... we should be midway through
+ # any run of zero values following the bucket
+ for upper_idx in range(lower_idx + 1, len(buckets)):
+ if buckets[upper_idx] != 0:
+ break
+ return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
+ else:
+ # treat values as uniform throughout the bucket, and find where this value
+ # should lie
+ lower_bound = boundaries[lower_idx]
+ upper_bound = boundaries[lower_idx + 1]
+ return (upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) / float(buckets[lower_idx]))
+
+
+def percentile(buckets, pctl, boundaries):
+ return _threshold_for_count_below(buckets, boundaries,
+ sum(buckets) * pctl / 100.0)
+
+
+def counter(core_stats, name):
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ return int(stat.get('count', 0))
+
+
+Histogram = collections.namedtuple('Histogram', 'buckets boundaries')
+
+
+def histogram(core_stats, name):
+ for stat in core_stats['metrics']:
+ if stat['name'] == name:
+ buckets = []
+ boundaries = []
+ for b in stat['histogram']['buckets']:
+ buckets.append(int(b.get('count', 0)))
+ boundaries.append(int(b.get('start', 0)))
+ return Histogram(buckets=buckets, boundaries=boundaries)
diff --git a/grpc/tools/run_tests/performance/patch_scenario_results_schema.py b/grpc/tools/run_tests/performance/patch_scenario_results_schema.py
new file mode 100755
index 00000000..694e16af
--- /dev/null
+++ b/grpc/tools/run_tests/performance/patch_scenario_results_schema.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use to patch schema of existing scenario results tables (after adding fields).
+
+from __future__ import print_function
+
+import argparse
+import calendar
+import json
+import os
+import sys
+import time
+import uuid
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_PROJECT_ID = 'grpc-testing'
+
+
+def _patch_results_table(dataset_id, table_id):
+ bq = big_query_utils.create_big_query()
+ with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+ 'r') as f:
+ table_schema = json.loads(f.read())
+ desc = 'Results of performance benchmarks.'
+ return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id, table_id,
+ table_schema)
+
+
+argp = argparse.ArgumentParser(
+ description='Patch schema of scenario results table.')
+argp.add_argument('--bq_result_table',
+ required=True,
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to patch.')
+
+args = argp.parse_args()
+
+dataset_id, table_id = args.bq_result_table.split('.', 2)
+
+_patch_results_table(dataset_id, table_id)
+print('Successfully patched schema of %s.\n' % args.bq_result_table)
diff --git a/grpc/tools/run_tests/performance/process_local_perf_flamegraphs.sh b/grpc/tools/run_tests/performance/process_local_perf_flamegraphs.sh
new file mode 100755
index 00000000..b7b05fde
--- /dev/null
+++ b/grpc/tools/run_tests/performance/process_local_perf_flamegraphs.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+mkdir -p "$OUTPUT_DIR"
+
+PERF_DATA_FILE="${PERF_BASE_NAME}-perf.data"
+PERF_SCRIPT_OUTPUT="${PERF_BASE_NAME}-out.perf"
+
+# Generate Flame graphs
+echo "running perf script on $PERF_DATA_FILE"
+perf script -i "$PERF_DATA_FILE" > "$PERF_SCRIPT_OUTPUT"
+
+# use https://github.com/brendangregg/FlameGraph
+~/FlameGraph/stackcollapse-perf.pl "$PERF_SCRIPT_OUTPUT" | ~/FlameGraph/flamegraph.pl > "${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg"
diff --git a/grpc/tools/run_tests/performance/process_remote_perf_flamegraphs.sh b/grpc/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
new file mode 100755
index 00000000..6e42564d
--- /dev/null
+++ b/grpc/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+mkdir -p "$OUTPUT_DIR"
+
+PERF_DATA_FILE="${PERF_BASE_NAME}-perf.data"
+PERF_SCRIPT_OUTPUT="${PERF_BASE_NAME}-out.perf"
+
+# Generate Flame graphs
+echo "running perf script on $USER_AT_HOST with perf.data"
+# shellcheck disable=SC2029
+ssh "$USER_AT_HOST" "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz"
+
+scp "$USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz" .
+
+gzip -d -f "$PERF_SCRIPT_OUTPUT.gz"
+
+# use https://github.com/brendangregg/FlameGraph
+~/FlameGraph/stackcollapse-perf.pl --kernel "$PERF_SCRIPT_OUTPUT" | ~/FlameGraph/flamegraph.pl --color=java --hash > "${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg"
diff --git a/grpc/tools/run_tests/performance/remote_host_build.sh b/grpc/tools/run_tests/performance/remote_host_build.sh
new file mode 100755
index 00000000..862bd6c0
--- /dev/null
+++ b/grpc/tools/run_tests/performance/remote_host_build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# execute the build script remotely
+# shellcheck disable=SC2029
+ssh "${USER_AT_HOST}" "CONFIG=${CONFIG} ~/performance_workspace/grpc/tools/run_tests/performance/build_performance.sh $*"
diff --git a/grpc/tools/run_tests/performance/remote_host_prepare.sh b/grpc/tools/run_tests/performance/remote_host_prepare.sh
new file mode 100755
index 00000000..685c4357
--- /dev/null
+++ b/grpc/tools/run_tests/performance/remote_host_prepare.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# TODO(jtattermusch): To be sure there are no running processes that would
+# mess with the results, be rough and reboot the slave here
+# and wait for it to come back online.
+ssh "${USER_AT_HOST}" "killall -9 qps_worker dotnet mono node ruby worker || true"
+
+# On Windows, killall is not supported & we need to kill all pending workers
+# before attempting to delete the workspace
+ssh "${USER_AT_HOST}" "ps -e | egrep 'qps_worker|dotnet' | awk '{print \$1}' | xargs kill -9 || true"
+
+# cleanup after previous builds
+ssh "${USER_AT_HOST}" "rm -rf ~/performance_workspace && mkdir -p ~/performance_workspace"
+
+# push the current sources to the slave and unpack it.
+scp ../grpc.tar "${USER_AT_HOST}:~/performance_workspace"
+# Windows workaround: attempt to untar twice, first run is going to fail
+# with symlink creation error(s).
+ssh "${USER_AT_HOST}" "tar -xf ~/performance_workspace/grpc.tar -C ~/performance_workspace || tar -xf ~/performance_workspace/grpc.tar -C ~/performance_workspace"
+
+# For consistency with local run, invoke the kill_workers script remotely.
+# shellcheck disable=SC2088
+ssh "${USER_AT_HOST}" "~/performance_workspace/grpc/tools/run_tests/performance/kill_workers.sh"
+
+# make sure the port server is running (required by C++ qps_worker)
+ssh "${USER_AT_HOST}" "cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py"
diff --git a/grpc/tools/run_tests/performance/run_netperf.sh b/grpc/tools/run_tests/performance/run_netperf.sh
new file mode 100755
index 00000000..2a32051d
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_netperf.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+netperf >netperf_latency.txt -P 0 -t TCP_RR -H "$NETPERF_SERVER_HOST" -- -r 1,1 -o P50_LATENCY,P90_LATENCY,P99_LATENCY
+
+cat netperf_latency.txt
+
+if [ "$BQ_RESULT_TABLE" != "" ]
+then
+ tools/run_tests/performance/bq_upload_result.py \
+ --file_to_upload=netperf_latency.txt \
+ --file_format=netperf_latency_csv \
+ --bq_result_table="$BQ_RESULT_TABLE"
+fi
diff --git a/grpc/tools/run_tests/performance/run_qps_driver.sh b/grpc/tools/run_tests/performance/run_qps_driver.sh
new file mode 100755
index 00000000..47a03db0
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_qps_driver.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+cmake/build/qps_json_driver "$@"
+
+if [ "$BQ_RESULT_TABLE" != "" ]
+then
+ tools/run_tests/performance/bq_upload_result.py --bq_result_table="$BQ_RESULT_TABLE"
+fi
diff --git a/grpc/tools/run_tests/performance/run_worker_csharp.sh b/grpc/tools/run_tests/performance/run_worker_csharp.sh
new file mode 100755
index 00000000..af944f9f
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_csharp.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# needed to correctly locate testca
+cd src/csharp/Grpc.IntegrationTesting.QpsWorker/bin/Release/netcoreapp2.1
+
+dotnet exec Grpc.IntegrationTesting.QpsWorker.dll "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_go.sh b/grpc/tools/run_tests/performance/run_worker_go.sh
new file mode 100755
index 00000000..1127f4f2
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_go.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+GOPATH=$(pwd)/../gopath
+export GOPATH
+
+"${GOPATH}/bin/worker" "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_java.sh b/grpc/tools/run_tests/performance/run_worker_java.sh
new file mode 100755
index 00000000..cff6faf6
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_java.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# Enter repo root
+cd "$(dirname "$0")/../../.."
+
+# Enter the grpc-java repo root (expected to be next to grpc repo root)
+cd ../grpc-java
+
+benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_node.sh b/grpc/tools/run_tests/performance/run_worker_node.sh
new file mode 100755
index 00000000..658cd508
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_node.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# shellcheck disable=SC1090
+. "$HOME/.nvm/nvm.sh"
+
+nvm use 10
+
+set -ex
+
+fixture=$1
+
+shift
+
+# Enter repo root
+cd "$(dirname "$0")/../../.."
+
+# Enter the grpc-node repo root (expected to be next to grpc repo root)
+cd ../grpc-node
+
+node -r "./test/fixtures/$fixture" test/performance/worker.js "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_php.sh b/grpc/tools/run_tests/performance/run_worker_php.sh
new file mode 100755
index 00000000..97292a99
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_php.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# shellcheck disable=SC1090
+source ~/.rvm/scripts/rvm
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+# The proxy worker for PHP is implemented in Ruby
+ruby src/ruby/qps/proxy-worker.rb "$@"
+
diff --git a/grpc/tools/run_tests/performance/run_worker_python.sh b/grpc/tools/run_tests/performance/run_worker_python.sh
new file mode 100755
index 00000000..674ae279
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_python.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+bazel-bin/src/python/grpcio_tests/tests/qps/qps_worker "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_python_asyncio.sh b/grpc/tools/run_tests/performance/run_worker_python_asyncio.sh
new file mode 100755
index 00000000..7bf8d2f3
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_python_asyncio.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+bazel-bin/src/python/grpcio_tests/tests_aio/benchmark/worker "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_ruby.sh b/grpc/tools/run_tests/performance/run_worker_ruby.sh
new file mode 100755
index 00000000..ed448463
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_ruby.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# shellcheck disable=SC1090
+source ~/.rvm/scripts/rvm
+set -ex
+
+cd "$(dirname "$0")/../../.."
+
+ruby src/ruby/qps/worker.rb "$@"
diff --git a/grpc/tools/run_tests/performance/run_worker_rust.sh b/grpc/tools/run_tests/performance/run_worker_rust.sh
new file mode 100755
index 00000000..7e2b4ed4
--- /dev/null
+++ b/grpc/tools/run_tests/performance/run_worker_rust.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2016 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+cd $(dirname $0)/../../..
+
+../grpc-rs/target/release/qps_worker $@
diff --git a/grpc/tools/run_tests/performance/scenario_config.py b/grpc/tools/run_tests/performance/scenario_config.py
new file mode 100644
index 00000000..9368d40d
--- /dev/null
+++ b/grpc/tools/run_tests/performance/scenario_config.py
@@ -0,0 +1,1490 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# performance scenario configuration for various languages
+
+import math
+
+WARMUP_SECONDS = 5
+JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in.
+BENCHMARK_SECONDS = 30
+
+SMOKETEST = 'smoketest'
+SCALABLE = 'scalable'
+INPROC = 'inproc'
+SWEEP = 'sweep'
+DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST]
+
+SECURE_SECARGS = {
+ 'use_test_ca': True,
+ 'server_host_override': 'foo.test.google.fr'
+}
+
+HISTOGRAM_PARAMS = {
+ 'resolution': 0.01,
+ 'max_possible': 60e9,
+}
+
+# target number of RPCs outstanding on across all client channels in
+# non-ping-pong tests (since we can only specify per-channel numbers, the
+# actual target will be slightly higher)
+OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
+
+# wide is the number of client channels in multi-channel tests (1 otherwise)
+WIDE = 64
+
+
+def _get_secargs(is_secure):
+ if is_secure:
+ return SECURE_SECARGS
+ else:
+ return None
+
+
+def remove_nonproto_fields(scenario):
+ """Remove special-purpose that contains some extra info about the scenario
+ but don't belong to the ScenarioConfig protobuf message"""
+ scenario.pop('CATEGORIES', None)
+ scenario.pop('CLIENT_LANGUAGE', None)
+ scenario.pop('SERVER_LANGUAGE', None)
+ scenario.pop('EXCLUDED_POLL_ENGINES', None)
+ return scenario
+
+
+def geometric_progression(start, stop, step):
+ n = start
+ while n < stop:
+ yield int(round(n))
+ n *= step
+
+
+def _payload_type(use_generic_payload, req_size, resp_size):
+ r = {}
+ sizes = {
+ 'req_size': req_size,
+ 'resp_size': resp_size,
+ }
+ if use_generic_payload:
+ r['bytebuf_params'] = sizes
+ else:
+ r['simple_params'] = sizes
+ return r
+
+
+def _load_params(offered_load):
+ r = {}
+ if offered_load is None:
+ r['closed_loop'] = {}
+ else:
+ load = {}
+ load['offered_load'] = offered_load
+ r['poisson'] = load
+ return r
+
+
+def _add_channel_arg(config, key, value):
+ if 'channel_args' in config:
+ channel_args = config['channel_args']
+ else:
+ channel_args = []
+ config['channel_args'] = channel_args
+ arg = {'name': key}
+ if isinstance(value, int):
+ arg['int_value'] = value
+ else:
+ arg['str_value'] = value
+ channel_args.append(arg)
+
+
+def _ping_pong_scenario(name,
+ rpc_type,
+ client_type,
+ server_type,
+ secure=True,
+ use_generic_payload=False,
+ req_size=0,
+ resp_size=0,
+ unconstrained_client=None,
+ client_language=None,
+ server_language=None,
+ async_server_threads=0,
+ client_processes=0,
+ server_processes=0,
+ server_threads_per_cq=0,
+ client_threads_per_cq=0,
+ warmup_seconds=WARMUP_SECONDS,
+ categories=DEFAULT_CATEGORIES,
+ channels=None,
+ outstanding=None,
+ num_clients=None,
+ resource_quota_size=None,
+ messages_per_stream=None,
+ excluded_poll_engines=[],
+ minimal_stack=False,
+ offered_load=None):
+ """Creates a basic ping pong scenario."""
+ scenario = {
+ 'name': name,
+ 'num_servers': 1,
+ 'num_clients': 1,
+ 'client_config': {
+ 'client_type': client_type,
+ 'security_params': _get_secargs(secure),
+ 'outstanding_rpcs_per_channel': 1,
+ 'client_channels': 1,
+ 'async_client_threads': 1,
+ 'client_processes': client_processes,
+ 'threads_per_cq': client_threads_per_cq,
+ 'rpc_type': rpc_type,
+ 'histogram_params': HISTOGRAM_PARAMS,
+ 'channel_args': [],
+ },
+ 'server_config': {
+ 'server_type': server_type,
+ 'security_params': _get_secargs(secure),
+ 'async_server_threads': async_server_threads,
+ 'server_processes': server_processes,
+ 'threads_per_cq': server_threads_per_cq,
+ 'channel_args': [],
+ },
+ 'warmup_seconds': warmup_seconds,
+ 'benchmark_seconds': BENCHMARK_SECONDS
+ }
+ if resource_quota_size:
+ scenario['server_config']['resource_quota_size'] = resource_quota_size
+ if use_generic_payload:
+ if server_type != 'ASYNC_GENERIC_SERVER':
+ raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
+ scenario['server_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ scenario['client_config']['payload_config'] = _payload_type(
+ use_generic_payload, req_size, resp_size)
+
+ # Optimization target of 'throughput' does not work well with epoll1 polling
+ # engine. Use the default value of 'blend'
+ optimization_target = 'throughput'
+
+ if unconstrained_client:
+ outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
+ unconstrained_client]
+ # clamp buffer usage to something reasonable (16 gig for now)
+ MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
+ if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
+ outstanding_calls = max(1,
+ MAX_MEMORY_USE / max(req_size, resp_size))
+ wide = channels if channels is not None else WIDE
+ deep = int(math.ceil(1.0 * outstanding_calls / wide))
+
+ scenario[
+ 'num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
+ scenario['client_config']['outstanding_rpcs_per_channel'] = deep
+ scenario['client_config']['client_channels'] = wide
+ scenario['client_config']['async_client_threads'] = 0
+ if offered_load is not None:
+ optimization_target = 'latency'
+ else:
+ scenario['client_config']['outstanding_rpcs_per_channel'] = 1
+ scenario['client_config']['client_channels'] = 1
+ scenario['client_config']['async_client_threads'] = 1
+ optimization_target = 'latency'
+
+ scenario['client_config']['load_params'] = _load_params(offered_load)
+
+ optimization_channel_arg = {
+ 'name': 'grpc.optimization_target',
+ 'str_value': optimization_target
+ }
+ scenario['client_config']['channel_args'].append(optimization_channel_arg)
+ scenario['server_config']['channel_args'].append(optimization_channel_arg)
+
+ if minimal_stack:
+ _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
+ _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
+
+ if messages_per_stream:
+ scenario['client_config']['messages_per_stream'] = messages_per_stream
+ if client_language:
+ # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['CLIENT_LANGUAGE'] = client_language
+ if server_language:
+ # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
+ scenario['SERVER_LANGUAGE'] = server_language
+ if categories:
+ scenario['CATEGORIES'] = categories
+ if excluded_poll_engines:
+ # The polling engines for which this scenario is excluded
+ scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
+ return scenario
+
+
+class CXXLanguage:
+
+ def __init__(self):
+ self.safename = 'cxx'
+
+ def worker_cmdline(self):
+ return ['cmake/build/qps_worker']
+
+ def worker_port_offset(self):
+ return 0
+
+ def scenarios(self):
+ # TODO(ctiller): add 70% load latency test
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=100,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[INPROC] + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_from_client_1channel_1MB',
+ rpc_type='STREAMING_FROM_CLIENT',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ unconstrained_client='async',
+ outstanding=1,
+ channels=1,
+ num_clients=1,
+ secure=False,
+ categories=[SMOKETEST] + [INPROC] + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=300,
+ resp_size=50,
+ unconstrained_client='async',
+ outstanding=30000,
+ channels=300,
+ offered_load=37500,
+ secure=False,
+ async_server_threads=16,
+ server_threads_per_cq=1,
+ categories=[SCALABLE])
+
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else [])
+ inproc_categories = ([INPROC] if not secure else [])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ for mps in geometric_progression(1, 20, 10):
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ for mps in geometric_progression(1, 200, math.sqrt(10)):
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+ (mps, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+ rpc_type='STREAMING',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=inproc_categories + [SCALABLE],
+ channels=1,
+ outstanding=100)
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ req_size=64 * 1024,
+ resp_size=64 * 1024,
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async-limited',
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async-limited',
+ secure=secure,
+ client_threads_per_cq=1000000,
+ server_threads_per_cq=1000000,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ client_threads_per_cq=2,
+ server_threads_per_cq=2,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
+ % (secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ outstanding=64,
+ req_size=128,
+ resp_size=8 * 1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=inproc_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
+ % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=smoketest_categories + inproc_categories +
+ [SCALABLE])
+
+ for rpc_type in [
+ 'unary', 'streaming', 'streaming_from_client',
+ 'streaming_from_server'
+ ]:
+ for synchronicity in ['sync', 'async']:
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_ping_pong_%s' %
+ (synchronicity, rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ async_server_threads=1,
+ minimal_stack=not secure,
+ secure=secure)
+
+ for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
+ 8):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
+ (synchronicity, rpc_type, secstr, size),
+ rpc_type=rpc_type.upper(),
+ req_size=size,
+ resp_size=size,
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
+ (synchronicity, rpc_type, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ server_threads_per_cq=3,
+ client_threads_per_cq=3,
+ categories=inproc_categories + [SCALABLE])
+
+ # TODO(vjpai): Re-enable this test. It has a lot of timeouts
+ # and hasn't yet been conclusively identified as a test failure
+ # or race in the library
+ # yield _ping_pong_scenario(
+ # 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
+ # rpc_type=rpc_type.upper(),
+ # client_type='%s_CLIENT' % synchronicity.upper(),
+ # server_type='%s_SERVER' % synchronicity.upper(),
+ # unconstrained_client=synchronicity,
+ # secure=secure,
+ # categories=smoketest_categories+[SCALABLE],
+ # resource_quota_size=500*1024)
+
+ if rpc_type == 'streaming':
+ for mps in geometric_progression(1, 20, 10):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=inproc_categories + [SCALABLE])
+
+ for mps in geometric_progression(1, 200, math.sqrt(10)):
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+ % (synchronicity, rpc_type, mps, secstr),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ messages_per_stream=mps,
+ minimal_stack=not secure,
+ categories=[SWEEP])
+
+ for channels in geometric_progression(
+ 1, 20000, math.sqrt(10)):
+ for outstanding in geometric_progression(
+ 1, 200000, math.sqrt(10)):
+ if synchronicity == 'sync' and outstanding > 1200:
+ continue
+ if outstanding < channels: continue
+ yield _ping_pong_scenario(
+ 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
+ % (synchronicity, rpc_type, secstr, channels,
+ outstanding),
+ rpc_type=rpc_type.upper(),
+ client_type='%s_CLIENT' % synchronicity.upper(),
+ server_type='%s_SERVER' % synchronicity.upper(),
+ unconstrained_client=synchronicity,
+ secure=secure,
+ minimal_stack=not secure,
+ categories=[SWEEP],
+ channels=channels,
+ outstanding=outstanding)
+
+ def __str__(self):
+ return 'c++'
+
+
+class CSharpLanguage:
+
+ def __init__(self):
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_csharp.sh']
+
+ def worker_port_offset(self):
+ return 100
+
+ def scenarios(self):
+ yield _ping_pong_scenario('csharp_generic_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_ping_pong_insecure_1MB',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_generic_async_streaming_qps_unconstrained_insecure',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=False,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario('csharp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario('csharp_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_sync_to_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_protobuf_async_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario('csharp_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync',
+ server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ client_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario('csharp_protobuf_async_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'csharp'
+
+
+class PythonLanguage:
+
+ def __init__(self):
+ self.safename = 'python'
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_python.sh']
+
+ def worker_port_offset(self):
+ return 500
+
+ def scenarios(self):
+ yield _ping_pong_scenario('python_generic_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario('python_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario('python_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario('python_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ 'python_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario('python_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=0,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario('python_protobuf_sync_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'python'
+
+
+class PythonAsyncIOLanguage:
+
+ def __init__(self):
+ self.safename = 'python_asyncio'
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_python_asyncio.sh']
+
+ def worker_port_offset(self):
+ return 1200
+
+ def scenarios(self):
+ for outstanding in [64, 128, 256, 512]:
+ for channels in [1, 4]:
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_ping_pong_%dx%d_max' %
+ (
+ outstanding,
+ channels,
+ ),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ outstanding=outstanding * channels,
+ channels=channels,
+ client_processes=0,
+ server_processes=0,
+ unconstrained_client='async',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_ping_pong_%d_1thread' %
+ outstanding,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ outstanding=outstanding,
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ unconstrained_client='async',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_generic_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ client_processes=1,
+ server_processes=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ unconstrained_client='async')
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ unconstrained_client='async')
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_to_cpp_protobuf_async_unary_ping_pong_1thread',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ channels=1,
+ client_processes=1,
+ unconstrained_client='async',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_to_cpp_protobuf_async_unary_ping_pong_max',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ channels=1,
+ client_processes=0,
+ server_language='c++',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_to_cpp_protobuf_sync_streaming_ping_pong_1thread',
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ unconstrained_client='async',
+ server_language='c++')
+
+ yield _ping_pong_scenario(
+ 'python_asyncio_protobuf_async_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ channels=1,
+ client_processes=1,
+ server_processes=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'python_asyncio'
+
+
+class RubyLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_ruby.sh']
+
+ def worker_port_offset(self):
+ return 300
+
+ def scenarios(self):
+ yield _ping_pong_scenario('ruby_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario('ruby_protobuf_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario('ruby_protobuf_sync_unary_qps_unconstrained',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ 'ruby_protobuf_sync_streaming_qps_unconstrained',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario('ruby_to_cpp_protobuf_sync_unary_ping_pong',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario('ruby_protobuf_unary_ping_pong_1MB',
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ req_size=1024 * 1024,
+ resp_size=1024 * 1024,
+ categories=[SMOKETEST, SCALABLE])
+
+ def __str__(self):
+ return 'ruby'
+
+
+class Php7Language:
+
+ def __init__(self, php7_protobuf_c=False):
+ pass
+ self.php7_protobuf_c = php7_protobuf_c
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ if self.php7_protobuf_c:
+ return [
+ 'tools/run_tests/performance/run_worker_php.sh',
+ '--use_protobuf_c_extension'
+ ]
+ return ['tools/run_tests/performance/run_worker_php.sh']
+
+ def worker_port_offset(self):
+ if self.php7_protobuf_c:
+ return 900
+ return 800
+
+ def scenarios(self):
+ php7_extension_mode = 'php7_protobuf_php_extension'
+ if self.php7_protobuf_c:
+ php7_extension_mode = 'php7_protobuf_c_extension'
+
+ yield _ping_pong_scenario('%s_to_cpp_protobuf_sync_unary_ping_pong' %
+ php7_extension_mode,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ server_language='c++',
+ async_server_threads=1)
+
+ # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
+ # better than async_server_threads=0/CPU usage 490%.
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ yield _ping_pong_scenario(
+ '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
+ php7_extension_mode,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='c++',
+ outstanding=1,
+ async_server_threads=1,
+ unconstrained_client='sync')
+
+ def __str__(self):
+ if self.php7_protobuf_c:
+ return 'php7_protobuf_c'
+ return 'php7'
+
+
+class JavaLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_java.sh']
+
+ def worker_port_offset(self):
+ return 400
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario('java_protobuf_async_unary_ping_pong_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario('java_protobuf_unary_ping_pong_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async-limited',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ warmup_seconds=JAVA_WARMUP_SECONDS)
+
+ # TODO(jtattermusch): add scenarios java vs C++
+
+ def __str__(self):
+ return 'java'
+
+
+class GoLanguage:
+
+ def __init__(self):
+ pass
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_go.sh']
+
+ def worker_port_offset(self):
+ return 600
+
+ def scenarios(self):
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario('go_generic_sync_streaming_ping_pong_%s' %
+ secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure)
+
+ yield _ping_pong_scenario('go_protobuf_sync_unary_ping_pong_%s' %
+ secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
+ rpc_type='UNARY',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ yield _ping_pong_scenario(
+ 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='SYNC_SERVER',
+ unconstrained_client='async',
+ secure=secure,
+ categories=[SCALABLE])
+
+ # unconstrained_client='async' is intended (client uses goroutines)
+ # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+ # but that's mostly because of lack of better name of the enum value.
+ yield _ping_pong_scenario(
+ 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
+ rpc_type='STREAMING',
+ client_type='SYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ categories=[SCALABLE])
+
+ # TODO(jtattermusch): add scenarios go vs C++
+
+ def __str__(self):
+ return 'go'
+
+
+class NodeLanguage:
+
+ def __init__(self, node_purejs=False):
+ pass
+ self.node_purejs = node_purejs
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ fixture = 'native_js' if self.node_purejs else 'native_native'
+ return [
+ 'tools/run_tests/performance/run_worker_node.sh', fixture,
+ '--benchmark_impl=grpc'
+ ]
+
+ def worker_port_offset(self):
+ if self.node_purejs:
+ return 1100
+ return 1000
+
+ def scenarios(self):
+ node_implementation = 'node_purejs' if self.node_purejs else 'node'
+ for secure in [True, False]:
+ secstr = 'secure' if secure else 'insecure'
+ smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+
+ yield _ping_pong_scenario(
+ '%s_to_node_generic_async_streaming_ping_pong_%s' %
+ (node_implementation, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ server_language='node',
+ use_generic_payload=True,
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ '%s_to_node_protobuf_async_streaming_ping_pong_%s' %
+ (node_implementation, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='node',
+ async_server_threads=1,
+ secure=secure)
+
+ yield _ping_pong_scenario(
+ '%s_to_node_protobuf_async_unary_ping_pong_%s' %
+ (node_implementation, secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='node',
+ async_server_threads=1,
+ secure=secure,
+ categories=smoketest_categories)
+
+ yield _ping_pong_scenario(
+ '%s_to_node_protobuf_async_unary_qps_unconstrained_%s' %
+ (node_implementation, secstr),
+ rpc_type='UNARY',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='node',
+ unconstrained_client='async',
+ secure=secure,
+ categories=smoketest_categories + [SCALABLE])
+
+ yield _ping_pong_scenario(
+ '%s_to_node_protobuf_async_streaming_qps_unconstrained_%s' %
+ (node_implementation, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_SERVER',
+ server_language='node',
+ unconstrained_client='async',
+ secure=secure,
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ '%s_to_node_generic_async_streaming_qps_unconstrained_%s' %
+ (node_implementation, secstr),
+ rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT',
+ server_type='ASYNC_GENERIC_SERVER',
+ server_language='node',
+ unconstrained_client='async',
+ use_generic_payload=True,
+ secure=secure,
+ categories=[SCALABLE])
+
+ # TODO(murgatroid99): add scenarios node vs C++
+
+ def __str__(self):
+ if self.node_purejs:
+ return 'node_purejs'
+ return 'node'
+
+class RustLanguage:
+
+ def __init__(self):
+ self.safename = str(self)
+
+ def worker_cmdline(self):
+ return ['tools/run_tests/performance/run_worker_rust.sh']
+
+ def worker_port_offset(self):
+ return 100
+
+ def scenarios(self):
+ yield _ping_pong_scenario(
+ 'rust_generic_async_streaming_ping_pong', rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
+ use_generic_payload=True,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_async_unary_ping_pong', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SMOKETEST,SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ unconstrained_client='async',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
+ server_language='c++', async_server_threads=1,
+ categories=[SMOKETEST, SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ server_language='c++', async_server_threads=1)
+
+ yield _ping_pong_scenario(
+ 'rust_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ unconstrained_client='async', server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
+ client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
+ unconstrained_client='sync', server_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'cpp_to_rust_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ unconstrained_client='async', client_language='c++',
+ categories=[SCALABLE])
+
+ yield _ping_pong_scenario(
+ 'rust_protobuf_async_unary_ping_pong_1MB', rpc_type='UNARY',
+ client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
+ req_size=1024*1024, resp_size=1024*1024,
+ categories=[SMOKETEST, SCALABLE])
+
+
+ def __str__(self):
+ return 'rust'
+
+LANGUAGES = {
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'ruby': RubyLanguage(),
+ 'rust': RustLanguage(),
+ 'php7': Php7Language(),
+ 'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
+ 'java': JavaLanguage(),
+ 'python': PythonLanguage(),
+ 'python_asyncio': PythonAsyncIOLanguage(),
+ 'go': GoLanguage(),
+ 'node': NodeLanguage(),
+ 'node_purejs': NodeLanguage(node_purejs=True)
+}
diff --git a/grpc/tools/run_tests/performance/scenario_result_schema.json b/grpc/tools/run_tests/performance/scenario_result_schema.json
new file mode 100644
index 00000000..8b8ebb9b
--- /dev/null
+++ b/grpc/tools/run_tests/performance/scenario_result_schema.json
@@ -0,0 +1,1899 @@
+[
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "buildNumber",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "buildUrl",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "jobName",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "gitCommit",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "gitActualCommit",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "created",
+ "type": "TIMESTAMP"
+ }
+ ],
+ "mode": "NULLABLE",
+ "name": "metadata",
+ "type": "RECORD"
+ },
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "name",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientConfig",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "numClients",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverConfig",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "numServers",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "warmupSeconds",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "benchmarkSeconds",
+ "type": "INTEGER"
+ }
+ ],
+ "mode": "NULLABLE",
+ "name": "scenario",
+ "type": "RECORD"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latencies",
+ "type": "STRING"
+ },
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "latencies",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timeElapsed",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timeUser",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timeSystem",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "cqPollCount",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_calls_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_calls_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cqs_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_channels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_subchannels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_channels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_poll",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_wait",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kicked_without_poller",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kicked_again",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_wakeup_fd",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_wakeup_cv",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_own_thread",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_epoll_ctl",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_fd_cache_hits",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_histogram_slow_lookups",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_write",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_read",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_backup_pollers_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_backup_poller_polls",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_batches",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_settings_writes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_pings_sent",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_offloaded",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_continued",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_partial_writes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_initial_write",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_start_new_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_retry_send_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_continue_pings",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_goaway_sent",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_rst_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_close_from_api",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_stream_flow_control",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_transport_flow_control",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_settings",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_bdp_estimator_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_flow_control_unstalled_by_update",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_application_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_keepalive_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_transport_flow_control_unstalled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_ping_response",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_force_rst_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_indexed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_incidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_incidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_notidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_notidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_nvridx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_nvridx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_uncompressed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_huffman",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_binary",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_binary_base64",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_indexed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_incidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_incidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_notidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_notidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_nvridx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_nvridx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_uncompressed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_huffman",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_binary",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_binary_base64",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_scheduled_final_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_offloaded",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_set_notify_on_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_cancelled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_short_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_long_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_to_self",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_wakeup_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_queue_drained",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_push_retries",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_requested_calls",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_slowpath_requests_queued",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_trylock_failures",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_trylock_successes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_transient_pop_failures",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_99p",
+ "type": "FLOAT"
+ }
+ ],
+ "mode": "REPEATED",
+ "name": "clientStats",
+ "type": "RECORD"
+ },
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "timeElapsed",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timeUser",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "timeSystem",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "cqPollCount",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_calls_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_calls_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cqs_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_channels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_client_subchannels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_channels_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_poll",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_wait",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kicked_without_poller",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kicked_again",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_wakeup_fd",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_wakeup_cv",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_kick_own_thread",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_epoll_ctl",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_pollset_fd_cache_hits",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_histogram_slow_lookups",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_write",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_syscall_read",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_backup_pollers_created",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_backup_poller_polls",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_batches",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_send_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_op_recv_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_settings_writes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_pings_sent",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_offloaded",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_writes_continued",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_partial_writes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_initial_write",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_start_new_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_message",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_initial_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_trailing_metadata",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_retry_send_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_continue_pings",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_goaway_sent",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_rst_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_close_from_api",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_stream_flow_control",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_transport_flow_control",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_send_settings",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_bdp_estimator_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_flow_control_unstalled_by_update",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_application_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_keepalive_ping",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_transport_flow_control_unstalled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_ping_response",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_initiate_write_due_to_force_rst_stream",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_spurious_writes_begun",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_indexed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_incidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_incidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_notidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_notidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_nvridx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_lithdr_nvridx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_uncompressed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_huffman",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_binary",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_recv_binary_base64",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_indexed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_incidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_incidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_notidx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_notidx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_nvridx",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_lithdr_nvridx_v",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_uncompressed",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_huffman",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_binary",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_hpack_send_binary_base64",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_scheduled_final_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_combiner_locks_offloaded",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_locks_scheduled_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_set_notify_on_cancel",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_combiner_cancelled",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_short_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_long_items",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_scheduled_to_self",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_wakeup_initiated",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_queue_drained",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_executor_push_retries",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_requested_calls",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_slowpath_requests_queued",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_trylock_failures",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_trylock_successes",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_cq_ev_queue_transient_pop_failures",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_call_initial_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_poll_events_returned_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_write_iov_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_tcp_read_offer_iov_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_size_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_initial_metadata_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_message_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_trailing_metadata_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_http2_send_flowctl_per_write_99p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_bkts",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_50p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_95p",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "core_server_cqs_checked_99p",
+ "type": "FLOAT"
+ }
+ ],
+ "mode": "REPEATED",
+ "name": "serverStats",
+ "type": "RECORD"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverCores",
+ "type": "STRING"
+ },
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "qps",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "qpsPerServerCore",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverSystemTime",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverUserTime",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientSystemTime",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientUserTime",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latency50",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latency90",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latency95",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latency99",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "latency999",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientPollsPerRequest",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverPollsPerRequest",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverQueriesPerCpuSec",
+ "type": "FLOAT"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientQueriesPerCpuSec",
+ "type": "FLOAT"
+ }
+ ],
+ "mode": "NULLABLE",
+ "name": "summary",
+ "type": "RECORD"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "clientSuccess",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverSuccess",
+ "type": "STRING"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "requestResults",
+ "type": "STRING"
+ },
+ {
+ "fields": [
+ {
+ "mode": "NULLABLE",
+ "name": "totalCpuTime",
+ "type": "INTEGER"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "idleCpuTime",
+ "type": "INTEGER"
+ }
+ ],
+ "mode": "REPEATED",
+ "name": "serverCpuStats",
+ "type": "RECORD"
+ },
+ {
+ "mode": "NULLABLE",
+ "name": "serverCpuUsage",
+ "type": "FLOAT"
+ }
+] \ No newline at end of file
diff --git a/grpc/tools/run_tests/python_utils/__init__.py b/grpc/tools/run_tests/python_utils/__init__.py
new file mode 100644
index 00000000..5772620b
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/grpc/tools/run_tests/python_utils/antagonist.py b/grpc/tools/run_tests/python_utils/antagonist.py
new file mode 100755
index 00000000..a928a4cb
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/antagonist.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""This is used by run_tests.py to create cpu load on a machine"""
+
+while True:
+ pass
diff --git a/grpc/tools/run_tests/python_utils/check_on_pr.py b/grpc/tools/run_tests/python_utils/check_on_pr.py
new file mode 100644
index 00000000..6516e9dd
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/check_on_pr.py
@@ -0,0 +1,140 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import os
+import sys
+import json
+import time
+import datetime
+import traceback
+
+import requests
+import jwt
+
+_GITHUB_API_PREFIX = 'https://api.github.com'
+_GITHUB_REPO = 'grpc/grpc'
+_GITHUB_APP_ID = 22338
+_INSTALLATION_ID = 519109
+
+_ACCESS_TOKEN_CACHE = None
+_ACCESS_TOKEN_FETCH_RETRIES = 6
+_ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S = 15
+
+
+def _jwt_token():
+ github_app_key = open(
+ os.path.join(os.environ['KOKORO_KEYSTORE_DIR'],
+ '73836_grpc_checks_private_key'), 'rb').read()
+ return jwt.encode(
+ {
+ 'iat': int(time.time()),
+ 'exp': int(time.time() + 60 * 10), # expire in 10 minutes
+ 'iss': _GITHUB_APP_ID,
+ },
+ github_app_key,
+ algorithm='RS256')
+
+
+def _access_token():
+ global _ACCESS_TOKEN_CACHE
+ if _ACCESS_TOKEN_CACHE == None or _ACCESS_TOKEN_CACHE['exp'] < time.time():
+ for i in range(_ACCESS_TOKEN_FETCH_RETRIES):
+ resp = requests.post(
+ url='https://api.github.com/app/installations/%s/access_tokens'
+ % _INSTALLATION_ID,
+ headers={
+ 'Authorization': 'Bearer %s' % _jwt_token().decode('ASCII'),
+ 'Accept': 'application/vnd.github.machine-man-preview+json',
+ })
+
+ try:
+ _ACCESS_TOKEN_CACHE = {
+ 'token': resp.json()['token'],
+ 'exp': time.time() + 60
+ }
+ break
+ except (KeyError, ValueError) as e:
+ traceback.print_exc(e)
+ print('HTTP Status %d %s' % (resp.status_code, resp.reason))
+ print("Fetch access token from Github API failed:")
+ print(resp.text)
+ if i != _ACCESS_TOKEN_FETCH_RETRIES - 1:
+ print('Retrying after %.2f second.' %
+ _ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S)
+ time.sleep(_ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S)
+ else:
+ print("error: Unable to fetch access token, exiting...")
+ sys.exit(0)
+
+ return _ACCESS_TOKEN_CACHE['token']
+
+
+def _call(url, method='GET', json=None):
+ if not url.startswith('https://'):
+ url = _GITHUB_API_PREFIX + url
+ headers = {
+ 'Authorization': 'Bearer %s' % _access_token(),
+ 'Accept': 'application/vnd.github.antiope-preview+json',
+ }
+ return requests.request(method=method, url=url, headers=headers, json=json)
+
+
+def _latest_commit():
+ resp = _call(
+ '/repos/%s/pulls/%s/commits' %
+ (_GITHUB_REPO, os.environ['KOKORO_GITHUB_PULL_REQUEST_NUMBER']))
+ return resp.json()[-1]
+
+
+def check_on_pr(name, summary, success=True):
+ """Create/Update a check on current pull request.
+
+ The check runs are aggregated by their name, so newer check will update the
+ older check with the same name.
+
+ Requires environment variable 'KOKORO_GITHUB_PULL_REQUEST_NUMBER' to indicate which pull request
+ should be updated.
+
+ Args:
+ name: The name of the check.
+ summary: A str in Markdown to be used as the detail information of the check.
+ success: A bool indicates whether the check is succeed or not.
+ """
+ if 'KOKORO_GIT_COMMIT' not in os.environ:
+ print('Missing KOKORO_GIT_COMMIT env var: not checking')
+ return
+ if 'KOKORO_KEYSTORE_DIR' not in os.environ:
+ print('Missing KOKORO_KEYSTORE_DIR env var: not checking')
+ return
+ if 'KOKORO_GITHUB_PULL_REQUEST_NUMBER' not in os.environ:
+ print('Missing KOKORO_GITHUB_PULL_REQUEST_NUMBER env var: not checking')
+ return
+ completion_time = str(
+ datetime.datetime.utcnow().replace(microsecond=0).isoformat()) + 'Z'
+ resp = _call('/repos/%s/check-runs' % _GITHUB_REPO,
+ method='POST',
+ json={
+ 'name': name,
+ 'head_sha': os.environ['KOKORO_GIT_COMMIT'],
+ 'status': 'completed',
+ 'completed_at': completion_time,
+ 'conclusion': 'success' if success else 'failure',
+ 'output': {
+ 'title': name,
+ 'summary': summary,
+ }
+ })
+ print('Result of Creating/Updating Check on PR:',
+ json.dumps(resp.json(), indent=2))
diff --git a/grpc/tools/run_tests/python_utils/dockerjob.py b/grpc/tools/run_tests/python_utils/dockerjob.py
new file mode 100755
index 00000000..206a6cd3
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/dockerjob.py
@@ -0,0 +1,156 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helpers to run docker instances as jobs."""
+
+from __future__ import print_function
+
+import tempfile
+import time
+import uuid
+import os
+import subprocess
+import json
+
+import jobset
+
+_DEVNULL = open(os.devnull, 'w')
+
+
+def random_name(base_name):
+ """Randomizes given base name."""
+ return '%s_%s' % (base_name, uuid.uuid4())
+
+
+def docker_kill(cid):
+ """Kills a docker container. Returns True if successful."""
+ return subprocess.call(['docker', 'kill', str(cid)],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0
+
+
+def docker_mapped_port(cid, port, timeout_seconds=15):
+ """Get port mapped to internal given internal port for given container."""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ try:
+ output = subprocess.check_output('docker port %s %s' % (cid, port),
+ stderr=_DEVNULL,
+ shell=True)
+ return int(output.split(':', 2)[1])
+ except subprocess.CalledProcessError as e:
+ pass
+ raise Exception('Failed to get exposed port %s for container %s.' %
+ (port, cid))
+
+
+def docker_ip_address(cid, timeout_seconds=15):
+ """Get port mapped to internal given internal port for given container."""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ cmd = 'docker inspect %s' % cid
+ try:
+ output = subprocess.check_output(cmd, stderr=_DEVNULL, shell=True)
+ json_info = json.loads(output)
+ assert len(json_info) == 1
+ out = json_info[0]['NetworkSettings']['IPAddress']
+ if not out:
+ continue
+ return out
+ except subprocess.CalledProcessError as e:
+ pass
+ raise Exception(
+ 'Non-retryable error: Failed to get ip address of container %s.' % cid)
+
+
+def wait_for_healthy(cid, shortname, timeout_seconds):
+ """Wait timeout_seconds for the container to become healthy"""
+ started = time.time()
+ while time.time() - started < timeout_seconds:
+ try:
+ output = subprocess.check_output([
+ 'docker', 'inspect', '--format="{{.State.Health.Status}}"', cid
+ ],
+ stderr=_DEVNULL)
+ if output.strip('\n') == 'healthy':
+ return
+ except subprocess.CalledProcessError as e:
+ pass
+ time.sleep(1)
+ raise Exception('Timed out waiting for %s (%s) to pass health check' %
+ (shortname, cid))
+
+
+def finish_jobs(jobs, suppress_failure=True):
+ """Kills given docker containers and waits for corresponding jobs to finish"""
+ for job in jobs:
+ job.kill(suppress_failure=suppress_failure)
+
+ while any(job.is_running() for job in jobs):
+ time.sleep(1)
+
+
+def image_exists(image):
+ """Returns True if given docker image exists."""
+ return subprocess.call(['docker', 'inspect', image],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0
+
+
+def remove_image(image, skip_nonexistent=False, max_retries=10):
+ """Attempts to remove docker image with retries."""
+ if skip_nonexistent and not image_exists(image):
+ return True
+ for attempt in range(0, max_retries):
+ if subprocess.call(['docker', 'rmi', '-f', image],
+ stdin=subprocess.PIPE,
+ stdout=_DEVNULL,
+ stderr=subprocess.STDOUT) == 0:
+ return True
+ time.sleep(2)
+ print('Failed to remove docker image %s' % image)
+ return False
+
+
+class DockerJob:
+ """Encapsulates a job"""
+
+ def __init__(self, spec):
+ self._spec = spec
+ self._job = jobset.Job(spec,
+ newline_on_success=True,
+ travis=True,
+ add_env={})
+ self._container_name = spec.container_name
+
+ def mapped_port(self, port):
+ return docker_mapped_port(self._container_name, port)
+
+ def ip_address(self):
+ return docker_ip_address(self._container_name)
+
+ def wait_for_healthy(self, timeout_seconds):
+ wait_for_healthy(self._container_name, self._spec.shortname,
+ timeout_seconds)
+
+ def kill(self, suppress_failure=False):
+ """Sends kill signal to the container."""
+ if suppress_failure:
+ self._job.suppress_failure_message()
+ return docker_kill(self._container_name)
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job.state() == jobset._RUNNING
diff --git a/grpc/tools/run_tests/python_utils/filter_pull_request_tests.py b/grpc/tools/run_tests/python_utils/filter_pull_request_tests.py
new file mode 100644
index 00000000..98a17fd1
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Filter out tests based on file differences compared to merge target branch"""
+
+from __future__ import print_function
+
+import re
+import six
+from subprocess import check_output
+
+
+class TestSuite:
+ """
+ Contains label to identify job as belonging to this test suite and
+ triggers to identify if changed files are relevant
+ """
+
+ def __init__(self, labels):
+ """
+ Build TestSuite to group tests based on labeling
+ :param label: strings that should match a jobs's platform, config, language, or test group
+ """
+ self.triggers = []
+ self.labels = labels
+
+ def add_trigger(self, trigger):
+ """
+ Add a regex to list of triggers that determine if a changed file should run tests
+ :param trigger: regex matching file relevant to tests
+ """
+ self.triggers.append(trigger)
+
+
+# Create test suites
+_CORE_TEST_SUITE = TestSuite(['c'])
+_CPP_TEST_SUITE = TestSuite(['c++'])
+_CSHARP_TEST_SUITE = TestSuite(['csharp'])
+_NODE_TEST_SUITE = TestSuite(['grpc-node'])
+_OBJC_TEST_SUITE = TestSuite(['objc'])
+_PHP_TEST_SUITE = TestSuite(['php', 'php7'])
+_PYTHON_TEST_SUITE = TestSuite(['python'])
+_RUBY_TEST_SUITE = TestSuite(['ruby'])
+_LINUX_TEST_SUITE = TestSuite(['linux'])
+_WINDOWS_TEST_SUITE = TestSuite(['windows'])
+_MACOS_TEST_SUITE = TestSuite(['macos'])
+_ALL_TEST_SUITES = [
+ _CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
+ _OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
+ _LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
+]
+
+# Dictionary of whitelistable files where the key is a regex matching changed files
+# and the value is a list of tests that should be run. An empty list means that
+# the changed files should not trigger any tests. Any changed file that does not
+# match any of these regexes will trigger all tests
+# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
+_WHITELIST_DICT = {
+ '^doc/': [],
+ '^examples/': [],
+ '^include/grpc\+\+/': [_CPP_TEST_SUITE],
+ '^summerofcode/': [],
+ '^src/cpp/': [_CPP_TEST_SUITE],
+ '^src/csharp/': [_CSHARP_TEST_SUITE],
+ '^src/objective\-c/': [_OBJC_TEST_SUITE],
+ '^src/php/': [_PHP_TEST_SUITE],
+ '^src/python/': [_PYTHON_TEST_SUITE],
+ '^src/ruby/': [_RUBY_TEST_SUITE],
+ '^templates/': [],
+ '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
+ '^test/cpp/': [_CPP_TEST_SUITE],
+ '^test/distrib/cpp/': [_CPP_TEST_SUITE],
+ '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
+ '^test/distrib/php/': [_PHP_TEST_SUITE],
+ '^test/distrib/python/': [_PYTHON_TEST_SUITE],
+ '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
+ '^vsprojects/': [_WINDOWS_TEST_SUITE],
+ 'composer\.json$': [_PHP_TEST_SUITE],
+ 'config\.m4$': [_PHP_TEST_SUITE],
+ 'CONTRIBUTING\.md$': [],
+ 'Gemfile$': [_RUBY_TEST_SUITE],
+ 'grpc\.def$': [_WINDOWS_TEST_SUITE],
+ 'grpc\.gemspec$': [_RUBY_TEST_SUITE],
+ 'gRPC\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
+ 'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
+ 'BUILDING\.md$': [],
+ 'LICENSE$': [],
+ 'MANIFEST\.md$': [],
+ 'package\.json$': [_PHP_TEST_SUITE],
+ 'package\.xml$': [_PHP_TEST_SUITE],
+ 'PATENTS$': [],
+ 'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
+ 'README\.md$': [],
+ 'requirements\.txt$': [_PYTHON_TEST_SUITE],
+ 'setup\.cfg$': [_PYTHON_TEST_SUITE],
+ 'setup\.py$': [_PYTHON_TEST_SUITE]
+}
+
+# Regex that combines all keys in _WHITELIST_DICT
+_ALL_TRIGGERS = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
+
+# Add all triggers to their respective test suites
+for trigger, test_suites in six.iteritems(_WHITELIST_DICT):
+ for test_suite in test_suites:
+ test_suite.add_trigger(trigger)
+
+
+def _get_changed_files(base_branch):
+ """
+ Get list of changed files between current branch and base of target merge branch
+ """
+ # Get file changes between branch and merge-base of specified branch
+ # Not combined to be Windows friendly
+ base_commit = check_output(["git", "merge-base", base_branch,
+ "HEAD"]).rstrip()
+ return check_output(["git", "diff", base_commit, "--name-only",
+ "HEAD"]).splitlines()
+
+
+def _can_skip_tests(file_names, triggers):
+ """
+ Determines if tests are skippable based on if all files do not match list of regexes
+ :param file_names: list of changed files generated by _get_changed_files()
+ :param triggers: list of regexes matching file name that indicates tests should be run
+ :return: safe to skip tests
+ """
+ for file_name in file_names:
+ if any(re.match(trigger, file_name) for trigger in triggers):
+ return False
+ return True
+
+
+def _remove_irrelevant_tests(tests, skippable_labels):
+ """
+ Filters out tests by config or language - will not remove sanitizer tests
+ :param tests: list of all tests generated by run_tests_matrix.py
+ :param skippable_labels: list of languages and platforms with skippable tests
+ :return: list of relevant tests
+ """
+ # test.labels[0] is platform and test.labels[2] is language
+ # We skip a test if both are considered safe to skip
+ return [test for test in tests if test.labels[0] not in skippable_labels or \
+ test.labels[2] not in skippable_labels]
+
+
+def affects_c_cpp(base_branch):
+ """
+ Determines if a pull request's changes affect C/C++. This function exists because
+ there are pull request tests that only test C/C++ code
+ :param base_branch: branch that a pull request is requesting to merge into
+ :return: boolean indicating whether C/C++ changes are made in pull request
+ """
+ changed_files = _get_changed_files(base_branch)
+ # Run all tests if any changed file is not in the whitelist dictionary
+ for changed_file in changed_files:
+ if not re.match(_ALL_TRIGGERS, changed_file):
+ return True
+ return not _can_skip_tests(
+ changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
+
+
+def filter_tests(tests, base_branch):
+ """
+ Filters out tests that are safe to ignore
+ :param tests: list of all tests generated by run_tests_matrix.py
+ :return: list of relevant tests
+ """
+ print(
+ 'Finding file differences between gRPC %s branch and pull request...\n'
+ % base_branch)
+ changed_files = _get_changed_files(base_branch)
+ for changed_file in changed_files:
+ print(' %s' % changed_file)
+ print('')
+
+ # Run all tests if any changed file is not in the whitelist dictionary
+ for changed_file in changed_files:
+ if not re.match(_ALL_TRIGGERS, changed_file):
+ return (tests)
+ # Figure out which language and platform tests to run
+ skippable_labels = []
+ for test_suite in _ALL_TEST_SUITES:
+ if _can_skip_tests(changed_files, test_suite.triggers):
+ for label in test_suite.labels:
+ print(' %s tests safe to skip' % label)
+ skippable_labels.append(label)
+ tests = _remove_irrelevant_tests(tests, skippable_labels)
+ return tests
diff --git a/grpc/tools/run_tests/python_utils/jobset.py b/grpc/tools/run_tests/python_utils/jobset.py
new file mode 100755
index 00000000..d9ae0620
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/jobset.py
@@ -0,0 +1,583 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run a group of subprocesses and then finish."""
+
+import logging
+import multiprocessing
+import os
+import platform
+import re
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import errno
+
+# cpu cost measurement
+measure_cpu_costs = False
+
+_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
+# Maximum number of bytes of job's stdout that will be stored in the result.
+# Only last N bytes of stdout will be kept if the actual output longer.
+_MAX_RESULT_SIZE = 64 * 1024
+
+
+# NOTE: If you change this, please make sure to test reviewing the
+# github PR with http://reviewable.io, which is known to add UTF-8
+# characters to the PR description, which leak into the environment here
+# and cause failures.
+def strip_non_ascii_chars(s):
+ return ''.join(c for c in s if ord(c) < 128)
+
+
+def sanitized_environment(env):
+ sanitized = {}
+ for key, value in env.items():
+ sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
+ return sanitized
+
+
+def platform_string():
+ if platform.system() == 'Windows':
+ return 'windows'
+ elif platform.system()[:7] == 'MSYS_NT':
+ return 'windows'
+ elif platform.system() == 'Darwin':
+ return 'mac'
+ elif platform.system() == 'Linux':
+ return 'linux'
+ else:
+ return 'posix'
+
+
+# setup a signal handler so that signal.pause registers 'something'
+# when a child finishes
+# not using futures and threading to avoid a dependency on subprocess32
+if platform_string() == 'windows':
+ pass
+else:
+
+ def alarm_handler(unused_signum, unused_frame):
+ pass
+
+ signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
+ signal.signal(signal.SIGALRM, alarm_handler)
+
+_SUCCESS = object()
+_FAILURE = object()
+_RUNNING = object()
+_KILLED = object()
+
+_COLORS = {
+ 'red': [31, 0],
+ 'green': [32, 0],
+ 'yellow': [33, 0],
+ 'lightgray': [37, 0],
+ 'gray': [30, 1],
+ 'purple': [35, 0],
+ 'cyan': [36, 0]
+}
+
+_BEGINNING_OF_LINE = '\x1b[0G'
+_CLEAR_LINE = '\x1b[2K'
+
+_TAG_COLOR = {
+ 'FAILED': 'red',
+ 'FLAKE': 'purple',
+ 'TIMEOUT_FLAKE': 'purple',
+ 'WARNING': 'yellow',
+ 'TIMEOUT': 'red',
+ 'PASSED': 'green',
+ 'START': 'gray',
+ 'WAITING': 'yellow',
+ 'SUCCESS': 'green',
+ 'IDLE': 'gray',
+ 'SKIPPED': 'cyan'
+}
+
+_FORMAT = '%(asctime)-15s %(message)s'
+logging.basicConfig(level=logging.INFO, format=_FORMAT)
+
+
+def eintr_be_gone(fn):
+ """Run fn until it doesn't stop because of EINTR"""
+ while True:
+ try:
+ return fn()
+ except IOError as e:
+ if e.errno != errno.EINTR:
+ raise
+
+
+def message(tag, msg, explanatory_text=None, do_newline=False):
+ if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
+ return
+ message.old_tag = tag
+ message.old_msg = msg
+ while True:
+ try:
+ if platform_string() == 'windows' or not sys.stdout.isatty():
+ if explanatory_text:
+ logging.info(explanatory_text)
+ logging.info('%s: %s', tag, msg)
+ else:
+ sys.stdout.write(
+ '%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
+ (_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' %
+ explanatory_text if explanatory_text is not None else '',
+ _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+ tag, msg, '\n'
+ if do_newline or explanatory_text is not None else ''))
+ sys.stdout.flush()
+ return
+ except IOError as e:
+ if e.errno != errno.EINTR:
+ raise
+
+
+message.old_tag = ''
+message.old_msg = ''
+
+
+def which(filename):
+ if '/' in filename:
+ return filename
+ for path in os.environ['PATH'].split(os.pathsep):
+ if os.path.exists(os.path.join(path, filename)):
+ return os.path.join(path, filename)
+ raise Exception('%s not found' % filename)
+
+
+class JobSpec(object):
+ """Specifies what to run for a job."""
+
+ def __init__(self,
+ cmdline,
+ shortname=None,
+ environ=None,
+ cwd=None,
+ shell=False,
+ timeout_seconds=5 * 60,
+ flake_retries=0,
+ timeout_retries=0,
+ kill_handler=None,
+ cpu_cost=1.0,
+ verbose_success=False,
+ logfilename=None):
+ """
+ Arguments:
+ cmdline: a list of arguments to pass as the command line
+ environ: a dictionary of environment variables to set in the child process
+ kill_handler: a handler that will be called whenever job.kill() is invoked
+ cpu_cost: number of cores per second this job needs
+ logfilename: use given file to store job's output, rather than using a temporary file
+ """
+ if environ is None:
+ environ = {}
+ self.cmdline = cmdline
+ self.environ = environ
+ self.shortname = cmdline[0] if shortname is None else shortname
+ self.cwd = cwd
+ self.shell = shell
+ self.timeout_seconds = timeout_seconds
+ self.flake_retries = flake_retries
+ self.timeout_retries = timeout_retries
+ self.kill_handler = kill_handler
+ self.cpu_cost = cpu_cost
+ self.verbose_success = verbose_success
+ self.logfilename = logfilename
+ if self.logfilename and self.flake_retries != 0 and self.timeout_retries != 0:
+ # Forbidden to avoid overwriting the test log when retrying.
+ raise Exception(
+ 'Cannot use custom logfile when retries are enabled')
+
+ def identity(self):
+ return '%r %r' % (self.cmdline, self.environ)
+
+ def __hash__(self):
+ return hash(self.identity())
+
+ def __cmp__(self, other):
+ return self.identity() == other.identity()
+
+ def __repr__(self):
+ return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
+ self.cmdline)
+
+ def __str__(self):
+ return '%s: %s %s' % (self.shortname, ' '.join(
+ '%s=%s' % kv for kv in self.environ.items()), ' '.join(
+ self.cmdline))
+
+
+class JobResult(object):
+
+ def __init__(self):
+ self.state = 'UNKNOWN'
+ self.returncode = -1
+ self.elapsed_time = 0
+ self.num_failures = 0
+ self.retries = 0
+ self.message = ''
+ self.cpu_estimated = 1
+ self.cpu_measured = 1
+
+
+def read_from_start(f):
+ f.seek(0)
+ return f.read()
+
+
+class Job(object):
+ """Manages one job."""
+
+ def __init__(self,
+ spec,
+ newline_on_success,
+ travis,
+ add_env,
+ quiet_success=False):
+ self._spec = spec
+ self._newline_on_success = newline_on_success
+ self._travis = travis
+ self._add_env = add_env.copy()
+ self._retries = 0
+ self._timeout_retries = 0
+ self._suppress_failure_message = False
+ self._quiet_success = quiet_success
+ if not self._quiet_success:
+ message('START', spec.shortname, do_newline=self._travis)
+ self.result = JobResult()
+ self.start()
+
+ def GetSpec(self):
+ return self._spec
+
+ def start(self):
+ if self._spec.logfilename:
+ # make sure the log directory exists
+ logfile_dir = os.path.dirname(
+ os.path.abspath(self._spec.logfilename))
+ if not os.path.exists(logfile_dir):
+ os.makedirs(logfile_dir)
+ self._logfile = open(self._spec.logfilename, 'w+')
+ else:
+ self._logfile = tempfile.TemporaryFile()
+ env = dict(os.environ)
+ env.update(self._spec.environ)
+ env.update(self._add_env)
+ env = sanitized_environment(env)
+ self._start = time.time()
+ cmdline = self._spec.cmdline
+ # The Unix time command is finicky when used with MSBuild, so we don't use it
+ # with jobs that run MSBuild.
+ global measure_cpu_costs
+ if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
+ cmdline = ['time', '-p'] + cmdline
+ else:
+ measure_cpu_costs = False
+ try_start = lambda: subprocess.Popen(args=cmdline,
+ stderr=subprocess.STDOUT,
+ stdout=self._logfile,
+ cwd=self._spec.cwd,
+ shell=self._spec.shell,
+ env=env)
+ delay = 0.3
+ for i in range(0, 4):
+ try:
+ self._process = try_start()
+ break
+ except OSError:
+ message(
+ 'WARNING', 'Failed to start %s, retrying in %f seconds' %
+ (self._spec.shortname, delay))
+ time.sleep(delay)
+ delay *= 2
+ else:
+ self._process = try_start()
+ self._state = _RUNNING
+
+ def state(self):
+ """Poll current state of the job. Prints messages at completion."""
+
+ def stdout(self=self):
+ stdout = read_from_start(self._logfile)
+ self.result.message = stdout[-_MAX_RESULT_SIZE:]
+ return stdout
+
+ if self._state == _RUNNING and self._process.poll() is not None:
+ elapsed = time.time() - self._start
+ self.result.elapsed_time = elapsed
+ if self._process.returncode != 0:
+ if self._retries < self._spec.flake_retries:
+ message('FLAKE',
+ '%s [ret=%d, pid=%d]' %
+ (self._spec.shortname, self._process.returncode,
+ self._process.pid),
+ stdout(),
+ do_newline=True)
+ self._retries += 1
+ self.result.num_failures += 1
+ self.result.retries = self._timeout_retries + self._retries
+ # NOTE: job is restarted regardless of jobset's max_time setting
+ self.start()
+ else:
+ self._state = _FAILURE
+ if not self._suppress_failure_message:
+ message('FAILED',
+ '%s [ret=%d, pid=%d, time=%.1fsec]' %
+ (self._spec.shortname, self._process.returncode,
+ self._process.pid, elapsed),
+ stdout(),
+ do_newline=True)
+ self.result.state = 'FAILED'
+ self.result.num_failures += 1
+ self.result.returncode = self._process.returncode
+ else:
+ self._state = _SUCCESS
+ measurement = ''
+ if measure_cpu_costs:
+ m = re.search(
+ r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
+ stdout())
+ real = float(m.group(1))
+ user = float(m.group(2))
+ sys = float(m.group(3))
+ if real > 0.5:
+ cores = (user + sys) / real
+ self.result.cpu_measured = float('%.01f' % cores)
+ self.result.cpu_estimated = float('%.01f' %
+ self._spec.cpu_cost)
+ measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
+ self.result.cpu_measured, self.result.cpu_estimated)
+ if not self._quiet_success:
+ message('PASSED',
+ '%s [time=%.1fsec, retries=%d:%d%s]' %
+ (self._spec.shortname, elapsed, self._retries,
+ self._timeout_retries, measurement),
+ stdout() if self._spec.verbose_success else None,
+ do_newline=self._newline_on_success or self._travis)
+ self.result.state = 'PASSED'
+ elif (self._state == _RUNNING and
+ self._spec.timeout_seconds is not None and
+ time.time() - self._start > self._spec.timeout_seconds):
+ elapsed = time.time() - self._start
+ self.result.elapsed_time = elapsed
+ if self._timeout_retries < self._spec.timeout_retries:
+ message('TIMEOUT_FLAKE',
+ '%s [pid=%d]' %
+ (self._spec.shortname, self._process.pid),
+ stdout(),
+ do_newline=True)
+ self._timeout_retries += 1
+ self.result.num_failures += 1
+ self.result.retries = self._timeout_retries + self._retries
+ if self._spec.kill_handler:
+ self._spec.kill_handler(self)
+ self._process.terminate()
+ # NOTE: job is restarted regardless of jobset's max_time setting
+ self.start()
+ else:
+ message('TIMEOUT',
+ '%s [pid=%d, time=%.1fsec]' %
+ (self._spec.shortname, self._process.pid, elapsed),
+ stdout(),
+ do_newline=True)
+ self.kill()
+ self.result.state = 'TIMEOUT'
+ self.result.num_failures += 1
+ return self._state
+
+ def kill(self):
+ if self._state == _RUNNING:
+ self._state = _KILLED
+ if self._spec.kill_handler:
+ self._spec.kill_handler(self)
+ self._process.terminate()
+
+ def suppress_failure_message(self):
+ self._suppress_failure_message = True
+
+
+class Jobset(object):
+ """Manages one run of jobs."""
+
+ def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
+ newline_on_success, travis, stop_on_failure, add_env,
+ quiet_success, max_time):
+ self._running = set()
+ self._check_cancelled = check_cancelled
+ self._cancelled = False
+ self._failures = 0
+ self._completed = 0
+ self._maxjobs = maxjobs
+ self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
+ self._newline_on_success = newline_on_success
+ self._travis = travis
+ self._stop_on_failure = stop_on_failure
+ self._add_env = add_env
+ self._quiet_success = quiet_success
+ self._max_time = max_time
+ self.resultset = {}
+ self._remaining = None
+ self._start_time = time.time()
+
+ def set_remaining(self, remaining):
+ self._remaining = remaining
+
+ def get_num_failures(self):
+ return self._failures
+
+ def cpu_cost(self):
+ c = 0
+ for job in self._running:
+ c += job._spec.cpu_cost
+ return c
+
+ def start(self, spec):
+ """Start a job. Return True on success, False on failure."""
+ while True:
+ if self._max_time > 0 and time.time(
+ ) - self._start_time > self._max_time:
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ message('SKIPPED', spec.shortname, do_newline=True)
+ self.resultset[spec.shortname] = [skipped_job_result]
+ return True
+ if self.cancelled(): return False
+ current_cpu_cost = self.cpu_cost()
+ if current_cpu_cost == 0: break
+ if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
+ if len(self._running) < self._maxjobs_cpu_agnostic:
+ break
+ self.reap(spec.shortname, spec.cpu_cost)
+ if self.cancelled(): return False
+ job = Job(spec, self._newline_on_success, self._travis, self._add_env,
+ self._quiet_success)
+ self._running.add(job)
+ if job.GetSpec().shortname not in self.resultset:
+ self.resultset[job.GetSpec().shortname] = []
+ return True
+
+ def reap(self, waiting_for=None, waiting_for_cost=None):
+ """Collect the dead jobs."""
+ while self._running:
+ dead = set()
+ for job in self._running:
+ st = eintr_be_gone(lambda: job.state())
+ if st == _RUNNING: continue
+ if st == _FAILURE or st == _KILLED:
+ self._failures += 1
+ if self._stop_on_failure:
+ self._cancelled = True
+ for job in self._running:
+ job.kill()
+ dead.add(job)
+ break
+ for job in dead:
+ self._completed += 1
+ if not self._quiet_success or job.result.state != 'PASSED':
+ self.resultset[job.GetSpec().shortname].append(job.result)
+ self._running.remove(job)
+ if dead: return
+ if not self._travis and platform_string() != 'windows':
+ rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
+ if self._remaining is not None and self._completed > 0:
+ now = time.time()
+ sofar = now - self._start_time
+ remaining = sofar / self._completed * (self._remaining +
+ len(self._running))
+ rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
+ if waiting_for is not None:
+ wstr = ' next: %s @ %.2f cpu' % (waiting_for,
+ waiting_for_cost)
+ else:
+ wstr = ''
+ message(
+ 'WAITING',
+ '%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
+ (rstr, len(self._running), self._completed, self._failures,
+ self.cpu_cost(), wstr))
+ if platform_string() == 'windows':
+ time.sleep(0.1)
+ else:
+ signal.alarm(10)
+ signal.pause()
+
+ def cancelled(self):
+ """Poll for cancellation."""
+ if self._cancelled: return True
+ if not self._check_cancelled(): return False
+ for job in self._running:
+ job.kill()
+ self._cancelled = True
+ return True
+
+ def finish(self):
+ while self._running:
+ if self.cancelled(): pass # poll cancellation
+ self.reap()
+ if platform_string() != 'windows':
+ signal.alarm(0)
+ return not self.cancelled() and self._failures == 0
+
+
+def _never_cancelled():
+ return False
+
+
+def tag_remaining(xs):
+ staging = []
+ for x in xs:
+ staging.append(x)
+ if len(staging) > 5000:
+ yield (staging.pop(0), None)
+ n = len(staging)
+ for i, x in enumerate(staging):
+ yield (x, n - i - 1)
+
+
+def run(cmdlines,
+ check_cancelled=_never_cancelled,
+ maxjobs=None,
+ maxjobs_cpu_agnostic=None,
+ newline_on_success=False,
+ travis=False,
+ infinite_runs=False,
+ stop_on_failure=False,
+ add_env={},
+ skip_jobs=False,
+ quiet_success=False,
+ max_time=-1):
+ if skip_jobs:
+ resultset = {}
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ for job in cmdlines:
+ message('SKIPPED', job.shortname, do_newline=True)
+ resultset[job.shortname] = [skipped_job_result]
+ return 0, resultset
+ js = Jobset(
+ check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
+ maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else
+ _DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env,
+ quiet_success, max_time)
+ for cmdline, remaining in tag_remaining(cmdlines):
+ if not js.start(cmdline):
+ break
+ if remaining is not None:
+ js.set_remaining(remaining)
+ js.finish()
+ return js.get_num_failures(), js.resultset
diff --git a/grpc/tools/run_tests/python_utils/port_server.py b/grpc/tools/run_tests/python_utils/port_server.py
new file mode 100755
index 00000000..228ca93d
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/port_server.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python2.7
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Manage TCP ports for unit tests; started by run_tests.py"""
+
+from __future__ import print_function
+
+import argparse
+from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from six.moves.socketserver import ThreadingMixIn
+import hashlib
+import os
+import socket
+import sys
+import time
+import random
+import threading
+import platform
+
+# increment this number whenever making a change to ensure that
+# the changes are picked up by running CI servers
+# note that all changes must be backwards compatible
+_MY_VERSION = 20
+
+if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
+ print(_MY_VERSION)
+ sys.exit(0)
+
+argp = argparse.ArgumentParser(description='Server for httpcli_test')
+argp.add_argument('-p', '--port', default=12345, type=int)
+argp.add_argument('-l', '--logfile', default=None, type=str)
+args = argp.parse_args()
+
+if args.logfile is not None:
+ sys.stdin.close()
+ sys.stderr.close()
+ sys.stdout.close()
+ sys.stderr = open(args.logfile, 'w')
+ sys.stdout = sys.stderr
+
+print('port server running on port %d' % args.port)
+
+pool = []
+in_use = {}
+mu = threading.Lock()
+
+# Cronet restricts the following ports to be used (see
+# https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
+# ports is used in a Cronet test, the test would fail (see issue #12149). These
+# ports must be excluded from pool.
+cronet_restricted_ports = [
+ 1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87,
+ 95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139,
+ 143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563,
+ 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668,
+ 6669, 6697
+]
+
+
+def can_connect(port):
+ # this test is only really useful on unices where SO_REUSE_PORT is available
+ # so on Windows, where this test is expensive, skip it
+ if platform.system() == 'Windows': return False
+ s = socket.socket()
+ try:
+ s.connect(('localhost', port))
+ return True
+ except socket.error as e:
+ return False
+ finally:
+ s.close()
+
+
+def can_bind(port, proto):
+ s = socket.socket(proto, socket.SOCK_STREAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ s.bind(('localhost', port))
+ return True
+ except socket.error as e:
+ return False
+ finally:
+ s.close()
+
+
+def refill_pool(max_timeout, req):
+ """Scan for ports not marked for being in use"""
+ chk = [
+ port for port in range(1025, 32766)
+ if port not in cronet_restricted_ports
+ ]
+ random.shuffle(chk)
+ for i in chk:
+ if len(pool) > 100: break
+ if i in in_use:
+ age = time.time() - in_use[i]
+ if age < max_timeout:
+ continue
+ req.log_message("kill old request %d" % i)
+ del in_use[i]
+ if can_bind(i, socket.AF_INET) and can_bind(
+ i, socket.AF_INET6) and not can_connect(i):
+ req.log_message("found available port %d" % i)
+ pool.append(i)
+
+
+def allocate_port(req):
+ global pool
+ global in_use
+ global mu
+ mu.acquire()
+ max_timeout = 600
+ while not pool:
+ refill_pool(max_timeout, req)
+ if not pool:
+ req.log_message("failed to find ports: retrying soon")
+ mu.release()
+ time.sleep(1)
+ mu.acquire()
+ max_timeout /= 2
+ port = pool[0]
+ pool = pool[1:]
+ in_use[port] = time.time()
+ mu.release()
+ return port
+
+
+keep_running = True
+
+
+class Handler(BaseHTTPRequestHandler):
+
+ def setup(self):
+ # If the client is unreachable for 5 seconds, close the connection
+ self.timeout = 5
+ BaseHTTPRequestHandler.setup(self)
+
+ def do_GET(self):
+ global keep_running
+ global mu
+ if self.path == '/get':
+ # allocate a new port, it will stay bound for ten minutes and until
+ # it's unused
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ p = allocate_port(self)
+ self.log_message('allocated port %d' % p)
+ self.wfile.write('%d' % p)
+ elif self.path[0:6] == '/drop/':
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ p = int(self.path[6:])
+ mu.acquire()
+ if p in in_use:
+ del in_use[p]
+ pool.append(p)
+ k = 'known'
+ else:
+ k = 'unknown'
+ mu.release()
+ self.log_message('drop %s port %d' % (k, p))
+ elif self.path == '/version_number':
+ # fetch a version string and the current process pid
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ self.wfile.write(_MY_VERSION)
+ elif self.path == '/dump':
+ # yaml module is not installed on Macs and Windows machines by default
+ # so we import it lazily (/dump action is only used for debugging)
+ import yaml
+ self.send_response(200)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+ mu.acquire()
+ now = time.time()
+ out = yaml.dump({
+ 'pool': pool,
+ 'in_use': dict((k, now - v) for k, v in in_use.items())
+ })
+ mu.release()
+ self.wfile.write(out)
+ elif self.path == '/quitquitquit':
+ self.send_response(200)
+ self.end_headers()
+ self.server.shutdown()
+
+
+class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
+ """Handle requests in a separate thread"""
+
+
+ThreadedHTTPServer(('', args.port), Handler).serve_forever()
diff --git a/grpc/tools/run_tests/python_utils/report_utils.py b/grpc/tools/run_tests/python_utils/report_utils.py
new file mode 100644
index 00000000..666edbf6
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/report_utils.py
@@ -0,0 +1,180 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Generate XML and HTML test reports."""
+
+try:
+ from mako.runtime import Context
+ from mako.template import Template
+ from mako import exceptions
+except (ImportError):
+ pass # Mako not installed but it is ok.
+import datetime
+import os
+import string
+import xml.etree.cElementTree as ET
+import six
+
+
+def _filter_msg(msg, output_format):
+ """Filters out nonprintable and illegal characters from the message."""
+ if output_format in ['XML', 'HTML']:
+ # keep whitespaces but remove formfeed and vertical tab characters
+ # that make XML report unparsable.
+ filtered_msg = ''.join(
+ filter(lambda x: x in string.printable and x != '\f' and x != '\v',
+ msg.decode('UTF-8', 'ignore')))
+ if output_format == 'HTML':
+ filtered_msg = filtered_msg.replace('"', '&quot;')
+ return filtered_msg
+ else:
+ return msg
+
+
+def new_junit_xml_tree():
+ return ET.ElementTree(ET.Element('testsuites'))
+
+
+def render_junit_xml_report(resultset,
+ report_file,
+ suite_package='grpc',
+ suite_name='tests',
+ replace_dots=True,
+ multi_target=False):
+ """Generate JUnit-like XML report."""
+ if not multi_target:
+ tree = new_junit_xml_tree()
+ append_junit_xml_results(tree, resultset, suite_package, suite_name,
+ '1', replace_dots)
+ create_xml_report_file(tree, report_file)
+ else:
+ # To have each test result displayed as a separate target by the Resultstore/Sponge UI,
+ # we generate a separate XML report file for each test result
+ for shortname, results in six.iteritems(resultset):
+ one_result = {shortname: results}
+ tree = new_junit_xml_tree()
+ append_junit_xml_results(tree, one_result,
+ '%s_%s' % (suite_package, shortname),
+ '%s_%s' % (suite_name, shortname), '1',
+ replace_dots)
+ per_suite_report_file = os.path.join(os.path.dirname(report_file),
+ shortname,
+ os.path.basename(report_file))
+ create_xml_report_file(tree, per_suite_report_file)
+
+
+def create_xml_report_file(tree, report_file):
+ """Generate JUnit-like report file from xml tree ."""
+ # ensure the report directory exists
+ report_dir = os.path.dirname(os.path.abspath(report_file))
+ if not os.path.exists(report_dir):
+ os.makedirs(report_dir)
+ tree.write(report_file, encoding='UTF-8')
+
+
+def append_junit_xml_results(tree,
+ resultset,
+ suite_package,
+ suite_name,
+ id,
+ replace_dots=True):
+ """Append a JUnit-like XML report tree with test results as a new suite."""
+ if replace_dots:
+ # ResultStore UI displays test suite names containing dots only as the component
+ # after the last dot, which results bad info being displayed in the UI.
+ # We replace dots by another character to avoid this problem.
+ suite_name = suite_name.replace('.', '_')
+ testsuite = ET.SubElement(tree.getroot(),
+ 'testsuite',
+ id=id,
+ package=suite_package,
+ name=suite_name,
+ timestamp=datetime.datetime.now().isoformat())
+ failure_count = 0
+ error_count = 0
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
+ if result.elapsed_time:
+ xml_test.set('time', str(result.elapsed_time))
+ filtered_msg = _filter_msg(result.message, 'XML')
+ if result.state == 'FAILED':
+ ET.SubElement(xml_test, 'failure',
+ message='Failure').text = filtered_msg
+ failure_count += 1
+ elif result.state == 'TIMEOUT':
+ ET.SubElement(xml_test, 'error',
+ message='Timeout').text = filtered_msg
+ error_count += 1
+ elif result.state == 'SKIPPED':
+ ET.SubElement(xml_test, 'skipped', message='Skipped')
+ testsuite.set('failures', str(failure_count))
+ testsuite.set('errors', str(error_count))
+
+
+def render_interop_html_report(client_langs, server_langs, test_cases,
+ auth_test_cases, http2_cases, http2_server_cases,
+ resultset, num_failures, cloud_to_prod,
+ prod_servers, http2_interop):
+ """Generate HTML report for interop tests."""
+ template_file = 'tools/run_tests/interop/interop_html_report.template'
+ try:
+ mytemplate = Template(filename=template_file, format_exceptions=True)
+ except NameError:
+ print(
+ 'Mako template is not installed. Skipping HTML report generation.')
+ return
+ except IOError as e:
+ print('Failed to find the template %s: %s' % (template_file, e))
+ return
+
+ sorted_test_cases = sorted(test_cases)
+ sorted_auth_test_cases = sorted(auth_test_cases)
+ sorted_http2_cases = sorted(http2_cases)
+ sorted_http2_server_cases = sorted(http2_server_cases)
+ sorted_client_langs = sorted(client_langs)
+ sorted_server_langs = sorted(server_langs)
+ sorted_prod_servers = sorted(prod_servers)
+
+ args = {
+ 'client_langs': sorted_client_langs,
+ 'server_langs': sorted_server_langs,
+ 'test_cases': sorted_test_cases,
+ 'auth_test_cases': sorted_auth_test_cases,
+ 'http2_cases': sorted_http2_cases,
+ 'http2_server_cases': sorted_http2_server_cases,
+ 'resultset': resultset,
+ 'num_failures': num_failures,
+ 'cloud_to_prod': cloud_to_prod,
+ 'prod_servers': sorted_prod_servers,
+ 'http2_interop': http2_interop
+ }
+
+ html_report_out_dir = 'reports'
+ if not os.path.exists(html_report_out_dir):
+ os.mkdir(html_report_out_dir)
+ html_file_path = os.path.join(html_report_out_dir, 'index.html')
+ try:
+ with open(html_file_path, 'w') as output_file:
+ mytemplate.render_context(Context(output_file, **args))
+ except:
+ print(exceptions.text_error_template().render())
+ raise
+
+
+def render_perf_profiling_results(output_filepath, profile_names):
+ with open(output_filepath, 'w') as output_file:
+ output_file.write('<ul>\n')
+ for name in profile_names:
+ output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
+ output_file.write('</ul>\n')
diff --git a/grpc/tools/run_tests/python_utils/start_port_server.py b/grpc/tools/run_tests/python_utils/start_port_server.py
new file mode 100644
index 00000000..f390d1d6
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/start_port_server.py
@@ -0,0 +1,129 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+from . import jobset
+
+import six.moves.urllib.request as request
+import logging
+import os
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+# must be synchronized with test/core/utils/port_server_client.h
+_PORT_SERVER_PORT = 32766
+
+
+def start_port_server():
+ # check if a compatible port server is running
+ # if incompatible (version mismatch) ==> start a new one
+ # if not running ==> start a new one
+ # otherwise, leave it up
+ try:
+ version = int(
+ request.urlopen('http://localhost:%d/version_number' %
+ _PORT_SERVER_PORT).read())
+ logging.info('detected port server running version %d', version)
+ running = True
+ except Exception as e:
+ logging.exception('failed to detect port server')
+ running = False
+ if running:
+ current_version = int(
+ subprocess.check_output([
+ sys.executable,
+ os.path.abspath('tools/run_tests/python_utils/port_server.py'),
+ 'dump_version'
+ ]))
+ logging.info('my port server is version %d', current_version)
+ running = (version >= current_version)
+ if not running:
+ logging.info('port_server version mismatch: killing the old one')
+ request.urlopen('http://localhost:%d/quitquitquit' %
+ _PORT_SERVER_PORT).read()
+ time.sleep(1)
+ if not running:
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ logging.info('starting port_server, with log file %s', logfile)
+ args = [
+ sys.executable,
+ os.path.abspath('tools/run_tests/python_utils/port_server.py'),
+ '-p',
+ '%d' % _PORT_SERVER_PORT, '-l', logfile
+ ]
+ env = dict(os.environ)
+ env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
+ if jobset.platform_string() == 'windows':
+ # Working directory of port server needs to be outside of Jenkins
+ # workspace to prevent file lock issues.
+ tempdir = tempfile.mkdtemp()
+ port_server = subprocess.Popen(
+ args,
+ env=env,
+ cwd=tempdir,
+ creationflags=0x00000008, # detached process
+ close_fds=True)
+ else:
+ port_server = subprocess.Popen(args,
+ env=env,
+ preexec_fn=os.setsid,
+ close_fds=True)
+ time.sleep(1)
+ # ensure port server is up
+ waits = 0
+ while True:
+ if waits > 10:
+ logging.warning(
+ 'killing port server due to excessive start up waits')
+ port_server.kill()
+ if port_server.poll() is not None:
+ logging.error('port_server failed to start')
+ # try one final time: maybe another build managed to start one
+ time.sleep(1)
+ try:
+ request.urlopen('http://localhost:%d/get' %
+ _PORT_SERVER_PORT).read()
+ logging.info(
+ 'last ditch attempt to contact port server succeeded')
+ break
+ except:
+ logging.exception(
+ 'final attempt to contact port server failed')
+ port_log = open(logfile, 'r').read()
+ print(port_log)
+ sys.exit(1)
+ try:
+ port_server_url = 'http://localhost:%d/get' % _PORT_SERVER_PORT
+ request.urlopen(port_server_url).read()
+ logging.info('port server is up and ready')
+ break
+ except socket.timeout:
+ logging.exception('while waiting for port_server')
+ time.sleep(1)
+ waits += 1
+ except IOError:
+ logging.exception('while waiting for port_server')
+ time.sleep(1)
+ waits += 1
+ except:
+ logging.exception(
+ 'error while contacting port server at "%s".'
+ 'Will try killing it.', port_server_url)
+ port_server.kill()
+ raise
diff --git a/grpc/tools/run_tests/python_utils/upload_rbe_results.py b/grpc/tools/run_tests/python_utils/upload_rbe_results.py
new file mode 100755
index 00000000..8aedd72a
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/upload_rbe_results.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Uploads RBE results to BigQuery"""
+
+import argparse
+import os
+import json
+import sys
+import urllib2
+import uuid
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_DATASET_ID = 'jenkins_test_results'
+_DESCRIPTION = 'Test results from master RBE builds on Kokoro'
+# 90 days in milliseconds
+_EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
+_PARTITION_TYPE = 'DAY'
+_PROJECT_ID = 'grpc-testing'
+_RESULTS_SCHEMA = [
+ ('job_name', 'STRING', 'Name of Kokoro job'),
+ ('build_id', 'INTEGER', 'Build ID of Kokoro job'),
+ ('build_url', 'STRING', 'URL of Kokoro build'),
+ ('test_target', 'STRING', 'Bazel target path'),
+ ('test_class_name', 'STRING', 'Name of test class'),
+ ('test_case', 'STRING', 'Name of test case'),
+ ('result', 'STRING', 'Test or build result'),
+ ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+ ('duration', 'FLOAT', 'Duration of the test run'),
+]
+_TABLE_ID = 'rbe_test_results'
+
+
+def _get_api_key():
+ """Returns string with API key to access ResultStore.
+ Intended to be used in Kokoro environment."""
+ api_key_directory = os.getenv('KOKORO_GFILE_DIR')
+ api_key_file = os.path.join(api_key_directory, 'resultstore_api_key')
+ assert os.path.isfile(api_key_file), 'Must add --api_key arg if not on ' \
+ 'Kokoro or Kokoro environment is not set up properly.'
+ with open(api_key_file, 'r') as f:
+ return f.read().replace('\n', '')
+
+
+def _get_invocation_id():
+ """Returns String of Bazel invocation ID. Intended to be used in
+ Kokoro environment."""
+ bazel_id_directory = os.getenv('KOKORO_ARTIFACTS_DIR')
+ bazel_id_file = os.path.join(bazel_id_directory, 'bazel_invocation_ids')
+ assert os.path.isfile(bazel_id_file), 'bazel_invocation_ids file, written ' \
+ 'by RBE initialization script, expected but not found.'
+ with open(bazel_id_file, 'r') as f:
+ return f.read().replace('\n', '')
+
+
+def _parse_test_duration(duration_str):
+ """Parse test duration string in '123.567s' format"""
+ try:
+ if duration_str.endswith('s'):
+ duration_str = duration_str[:-1]
+ return float(duration_str)
+ except:
+ return None
+
+
+def _upload_results_to_bq(rows):
+ """Upload test results to a BQ table.
+
+ Args:
+ rows: A list of dictionaries containing data for each row to insert
+ """
+ bq = big_query_utils.create_big_query()
+ big_query_utils.create_partitioned_table(bq,
+ _PROJECT_ID,
+ _DATASET_ID,
+ _TABLE_ID,
+ _RESULTS_SCHEMA,
+ _DESCRIPTION,
+ partition_type=_PARTITION_TYPE,
+ expiration_ms=_EXPIRATION_MS)
+
+ max_retries = 3
+ for attempt in range(max_retries):
+ if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, _TABLE_ID,
+ rows):
+ break
+ else:
+ if attempt < max_retries - 1:
+ print('Error uploading result to bigquery, will retry.')
+ else:
+ print(
+ 'Error uploading result to bigquery, all attempts failed.')
+ sys.exit(1)
+
+
+def _get_resultstore_data(api_key, invocation_id):
+ """Returns dictionary of test results by querying ResultStore API.
+ Args:
+ api_key: String of ResultStore API key
+ invocation_id: String of ResultStore invocation ID to results from
+ """
+ all_actions = []
+ page_token = ''
+ # ResultStore's API returns data on a limited number of tests. When we exceed
+ # that limit, the 'nextPageToken' field is included in the request to get
+ # subsequent data, so keep requesting until 'nextPageToken' field is omitted.
+ while True:
+ req = urllib2.Request(
+ url=
+ 'https://resultstore.googleapis.com/v2/invocations/%s/targets/-/configuredTargets/-/actions?key=%s&pageToken=%s&fields=next_page_token,actions.id,actions.status_attributes,actions.timing,actions.test_action'
+ % (invocation_id, api_key, page_token),
+ headers={'Content-Type': 'application/json'})
+ results = json.loads(urllib2.urlopen(req).read())
+ all_actions.extend(results['actions'])
+ if 'nextPageToken' not in results:
+ break
+ page_token = results['nextPageToken']
+ return all_actions
+
+
+if __name__ == "__main__":
+ # Arguments are necessary if running in a non-Kokoro environment.
+ argp = argparse.ArgumentParser(
+ description=
+ 'Fetches results for given RBE invocation and uploads them to BigQuery table.'
+ )
+ argp.add_argument('--api_key',
+ default='',
+ type=str,
+ help='The API key to read from ResultStore API')
+ argp.add_argument('--invocation_id',
+ default='',
+ type=str,
+ help='UUID of bazel invocation to fetch.')
+ argp.add_argument('--bq_dump_file',
+ default=None,
+ type=str,
+ help='Dump JSON data to file just before uploading')
+ argp.add_argument('--resultstore_dump_file',
+ default=None,
+ type=str,
+ help='Dump JSON data as received from ResultStore API')
+ argp.add_argument('--skip_upload',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Skip uploading to bigquery')
+ args = argp.parse_args()
+
+ api_key = args.api_key or _get_api_key()
+ invocation_id = args.invocation_id or _get_invocation_id()
+ resultstore_actions = _get_resultstore_data(api_key, invocation_id)
+
+ if args.resultstore_dump_file:
+ with open(args.resultstore_dump_file, 'w') as f:
+ json.dump(resultstore_actions, f, indent=4, sort_keys=True)
+ print('Dumped resultstore data to file %s' % args.resultstore_dump_file)
+
+ # google.devtools.resultstore.v2.Action schema:
+ # https://github.com/googleapis/googleapis/blob/master/google/devtools/resultstore/v2/action.proto
+ bq_rows = []
+ for index, action in enumerate(resultstore_actions):
+ # Filter out non-test related data, such as build results.
+ if 'testAction' not in action:
+ continue
+ # Some test results contain the fileProcessingErrors field, which indicates
+ # an issue with parsing results individual test cases.
+ if 'fileProcessingErrors' in action:
+ test_cases = [{
+ 'testCase': {
+ 'caseName': str(action['id']['actionId']),
+ }
+ }]
+ # Test timeouts have a different dictionary structure compared to pass and
+ # fail results.
+ elif action['statusAttributes']['status'] == 'TIMED_OUT':
+ test_cases = [{
+ 'testCase': {
+ 'caseName': str(action['id']['actionId']),
+ 'timedOut': True
+ }
+ }]
+ # When RBE believes its infrastructure is failing, it will abort and
+ # mark running tests as UNKNOWN. These infrastructure failures may be
+ # related to our tests, so we should investigate if specific tests are
+ # repeatedly being marked as UNKNOWN.
+ elif action['statusAttributes']['status'] == 'UNKNOWN':
+ test_cases = [{
+ 'testCase': {
+ 'caseName': str(action['id']['actionId']),
+ 'unknown': True
+ }
+ }]
+ # Take the timestamp from the previous action, which should be
+ # a close approximation.
+ action['timing'] = {
+ 'startTime':
+ resultstore_actions[index - 1]['timing']['startTime']
+ }
+ elif 'testSuite' not in action['testAction']:
+ continue
+ elif 'tests' not in action['testAction']['testSuite']:
+ continue
+ else:
+ test_cases = []
+ for tests_item in action['testAction']['testSuite']['tests']:
+ test_cases += tests_item['testSuite']['tests']
+ for test_case in test_cases:
+ if any(s in test_case['testCase'] for s in ['errors', 'failures']):
+ result = 'FAILED'
+ elif 'timedOut' in test_case['testCase']:
+ result = 'TIMEOUT'
+ elif 'unknown' in test_case['testCase']:
+ result = 'UNKNOWN'
+ else:
+ result = 'PASSED'
+ try:
+ bq_rows.append({
+ 'insertId': str(uuid.uuid4()),
+ 'json': {
+ 'job_name':
+ os.getenv('KOKORO_JOB_NAME'),
+ 'build_id':
+ os.getenv('KOKORO_BUILD_NUMBER'),
+ 'build_url':
+ 'https://source.cloud.google.com/results/invocations/%s'
+ % invocation_id,
+ 'test_target':
+ action['id']['targetId'],
+ 'test_class_name':
+ test_case['testCase'].get('className', ''),
+ 'test_case':
+ test_case['testCase']['caseName'],
+ 'result':
+ result,
+ 'timestamp':
+ action['timing']['startTime'],
+ 'duration':
+ _parse_test_duration(action['timing']['duration']),
+ }
+ })
+ except Exception as e:
+ print('Failed to parse test result. Error: %s' % str(e))
+ print(json.dumps(test_case, indent=4))
+ bq_rows.append({
+ 'insertId': str(uuid.uuid4()),
+ 'json': {
+ 'job_name':
+ os.getenv('KOKORO_JOB_NAME'),
+ 'build_id':
+ os.getenv('KOKORO_BUILD_NUMBER'),
+ 'build_url':
+ 'https://source.cloud.google.com/results/invocations/%s'
+ % invocation_id,
+ 'test_target':
+ action['id']['targetId'],
+ 'test_class_name':
+ 'N/A',
+ 'test_case':
+ 'N/A',
+ 'result':
+ 'UNPARSEABLE',
+ 'timestamp':
+ 'N/A',
+ }
+ })
+
+ if args.bq_dump_file:
+ with open(args.bq_dump_file, 'w') as f:
+ json.dump(bq_rows, f, indent=4, sort_keys=True)
+ print('Dumped BQ data to file %s' % args.bq_dump_file)
+
+ if not args.skip_upload:
+ # BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time.
+ for i in range((len(bq_rows) / 1000) + 1):
+ _upload_results_to_bq(bq_rows[i * 1000:(i + 1) * 1000])
+ else:
+ print('Skipped upload to bigquery.')
diff --git a/grpc/tools/run_tests/python_utils/upload_test_results.py b/grpc/tools/run_tests/python_utils/upload_test_results.py
new file mode 100644
index 00000000..c000d8e7
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/upload_test_results.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helper to upload Jenkins test results to BQ"""
+
+from __future__ import print_function
+
+import os
+import six
+import sys
+import time
+import uuid
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_DATASET_ID = 'jenkins_test_results'
+_DESCRIPTION = 'Test results from master job run on Jenkins'
+# 90 days in milliseconds
+_EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
+_PARTITION_TYPE = 'DAY'
+_PROJECT_ID = 'grpc-testing'
+_RESULTS_SCHEMA = [
+ ('job_name', 'STRING', 'Name of Jenkins job'),
+ ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
+ ('build_url', 'STRING', 'URL of Jenkins job'),
+ ('test_name', 'STRING', 'Individual test name'),
+ ('language', 'STRING', 'Language of test'),
+ ('platform', 'STRING', 'Platform used for test'),
+ ('config', 'STRING', 'Config used for test'),
+ ('compiler', 'STRING', 'Compiler used for test'),
+ ('iomgr_platform', 'STRING', 'Iomgr used for test'),
+ ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+ ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+ ('elapsed_time', 'FLOAT', 'How long test took to run'),
+ ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
+ ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
+ ('return_code', 'INTEGER', 'Exit code of test'),
+]
+_INTEROP_RESULTS_SCHEMA = [
+ ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
+ ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
+ ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
+ ('test_name', 'STRING',
+ 'Unique test name combining client, server, and test_name'),
+ ('suite', 'STRING',
+ 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
+ ('client', 'STRING', 'Client language'),
+ ('server', 'STRING', 'Server host name'),
+ ('test_case', 'STRING', 'Name of test case'),
+ ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+ ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+ ('elapsed_time', 'FLOAT', 'How long test took to run'),
+]
+
+
+def _get_build_metadata(test_results):
+ """Add Kokoro build metadata to test_results based on environment
+ variables set by Kokoro.
+ """
+ build_id = os.getenv('KOKORO_BUILD_NUMBER')
+ build_url = 'https://source.cloud.google.com/results/invocations/%s' % os.getenv(
+ 'KOKORO_BUILD_ID')
+ job_name = os.getenv('KOKORO_JOB_NAME')
+
+ if build_id:
+ test_results['build_id'] = build_id
+ if build_url:
+ test_results['build_url'] = build_url
+ if job_name:
+ test_results['job_name'] = job_name
+
+
+def _insert_rows_with_retries(bq, bq_table, bq_rows):
+ """Insert rows to bq table. Retry on error."""
+ # BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time.
+ for i in range((len(bq_rows) / 1000) + 1):
+ max_retries = 3
+ for attempt in range(max_retries):
+ if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ bq_table,
+ bq_rows[i * 1000:(i + 1) * 1000]):
+ break
+ else:
+ if attempt < max_retries - 1:
+ print('Error uploading result to bigquery, will retry.')
+ else:
+ print(
+ 'Error uploading result to bigquery, all attempts failed.'
+ )
+ sys.exit(1)
+
+
+def upload_results_to_bq(resultset, bq_table, extra_fields):
+ """Upload test results to a BQ table.
+
+ Args:
+ resultset: dictionary generated by jobset.run
+ bq_table: string name of table to create/upload results to in BQ
+ extra_fields: dict with extra values that will be uploaded along with the results
+ """
+ bq = big_query_utils.create_big_query()
+ big_query_utils.create_partitioned_table(bq,
+ _PROJECT_ID,
+ _DATASET_ID,
+ bq_table,
+ _RESULTS_SCHEMA,
+ _DESCRIPTION,
+ partition_type=_PARTITION_TYPE,
+ expiration_ms=_EXPIRATION_MS)
+
+ bq_rows = []
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ test_results = {}
+ _get_build_metadata(test_results)
+ test_results['cpu_estimated'] = result.cpu_estimated
+ test_results['cpu_measured'] = result.cpu_measured
+ test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+ test_results['result'] = result.state
+ test_results['return_code'] = result.returncode
+ test_results['test_name'] = shortname
+ test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+ for field_name, field_value in six.iteritems(extra_fields):
+ test_results[field_name] = field_value
+ row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+ bq_rows.append(row)
+ _insert_rows_with_retries(bq, bq_table, bq_rows)
+
+
+def upload_interop_results_to_bq(resultset, bq_table):
+ """Upload interop test results to a BQ table.
+
+ Args:
+ resultset: dictionary generated by jobset.run
+ bq_table: string name of table to create/upload results to in BQ
+ """
+ bq = big_query_utils.create_big_query()
+ big_query_utils.create_partitioned_table(bq,
+ _PROJECT_ID,
+ _DATASET_ID,
+ bq_table,
+ _INTEROP_RESULTS_SCHEMA,
+ _DESCRIPTION,
+ partition_type=_PARTITION_TYPE,
+ expiration_ms=_EXPIRATION_MS)
+
+ bq_rows = []
+ for shortname, results in six.iteritems(resultset):
+ for result in results:
+ test_results = {}
+ _get_build_metadata(test_results)
+ test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+ test_results['result'] = result.state
+ test_results['test_name'] = shortname
+ test_results['suite'] = shortname.split(':')[0]
+ test_results['client'] = shortname.split(':')[1]
+ test_results['server'] = shortname.split(':')[2]
+ test_results['test_case'] = shortname.split(':')[3]
+ test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+ row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+ bq_rows.append(row)
+ _insert_rows_with_retries(bq, bq_table, bq_rows)
diff --git a/grpc/tools/run_tests/python_utils/watch_dirs.py b/grpc/tools/run_tests/python_utils/watch_dirs.py
new file mode 100755
index 00000000..f2f1c006
--- /dev/null
+++ b/grpc/tools/run_tests/python_utils/watch_dirs.py
@@ -0,0 +1,60 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helper to watch a (set) of directories for modifications."""
+
+import os
+import time
+from six import string_types
+
+
+class DirWatcher(object):
+ """Helper to watch a (set) of directories for modifications."""
+
+ def __init__(self, paths):
+ if isinstance(paths, string_types):
+ paths = [paths]
+ self._done = False
+ self.paths = list(paths)
+ self.lastrun = time.time()
+ self._cache = self._calculate()
+
+ def _calculate(self):
+ """Walk over all subscribed paths, check most recent mtime."""
+ most_recent_change = None
+ for path in self.paths:
+ if not os.path.exists(path):
+ continue
+ if not os.path.isdir(path):
+ continue
+ for root, _, files in os.walk(path):
+ for f in files:
+ if f and f[0] == '.': continue
+ try:
+ st = os.stat(os.path.join(root, f))
+ except OSError as e:
+ if e.errno == os.errno.ENOENT:
+ continue
+ raise
+ if most_recent_change is None:
+ most_recent_change = st.st_mtime
+ else:
+ most_recent_change = max(most_recent_change,
+ st.st_mtime)
+ return most_recent_change
+
+ def most_recent_change(self):
+ if time.time() - self.lastrun > 1:
+ self._cache = self._calculate()
+ self.lastrun = time.time()
+ return self._cache
diff --git a/grpc/tools/run_tests/run_build_statistics.py b/grpc/tools/run_tests/run_build_statistics.py
new file mode 100755
index 00000000..d88f3db2
--- /dev/null
+++ b/grpc/tools/run_tests/run_build_statistics.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tool to get build statistics from Jenkins and upload to BigQuery."""
+
+from __future__ import print_function
+
+import argparse
+import jenkinsapi
+from jenkinsapi.custom_exceptions import JenkinsAPIException
+from jenkinsapi.jenkins import Jenkins
+import json
+import os
+import re
+import sys
+import urllib
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+import big_query_utils
+
+_PROJECT_ID = 'grpc-testing'
+_HAS_MATRIX = True
+_BUILDS = {
+ 'gRPC_interop_master': not _HAS_MATRIX,
+ 'gRPC_master_linux': not _HAS_MATRIX,
+ 'gRPC_master_macos': not _HAS_MATRIX,
+ 'gRPC_master_windows': not _HAS_MATRIX,
+ 'gRPC_performance_master': not _HAS_MATRIX,
+ 'gRPC_portability_master_linux': not _HAS_MATRIX,
+ 'gRPC_portability_master_windows': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
+ 'gRPC_master_msan_c': not _HAS_MATRIX,
+ 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
+ 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
+ 'gRPC_interop_pull_requests': not _HAS_MATRIX,
+ 'gRPC_performance_pull_requests': not _HAS_MATRIX,
+ 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_portability_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_linux': not _HAS_MATRIX,
+ 'gRPC_pull_requests_macos': not _HAS_MATRIX,
+ 'gRPC_pr_win': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
+ 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
+ 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
+}
+_URL_BASE = 'https://grpc-testing.appspot.com/job'
+
+# This is a dynamic list where known and active issues should be added.
+# Fixed ones should be removed.
+# Also try not to add multiple messages from the same failure.
+_KNOWN_ERRORS = [
+ 'Failed to build workspace Tests with scheme AllTests',
+ 'Build timed out',
+ 'TIMEOUT: tools/run_tests/pre_build_node.sh',
+ 'TIMEOUT: tools/run_tests/pre_build_ruby.sh',
+ 'FATAL: Unable to produce a script file',
+ 'FAILED: build_docker_c\+\+',
+ 'cannot find package \"cloud.google.com/go/compute/metadata\"',
+ 'LLVM ERROR: IO failure on output stream.',
+ 'MSBUILD : error MSB1009: Project file does not exist.',
+ 'fatal: git fetch_pack: expected ACK/NAK',
+ 'Failed to fetch from http://github.com/grpc/grpc.git',
+ ('hudson.remoting.RemotingSystemException: java.io.IOException: '
+ 'Backing channel is disconnected.'),
+ 'hudson.remoting.ChannelClosedException',
+ 'Could not initialize class hudson.Util',
+ 'Too many open files in system',
+ 'FAILED: bins/tsan/qps_openloop_test GRPC_POLL_STRATEGY=epoll',
+ 'FAILED: bins/tsan/qps_openloop_test GRPC_POLL_STRATEGY=legacy',
+ 'FAILED: bins/tsan/qps_openloop_test GRPC_POLL_STRATEGY=poll',
+ ('tests.bins/asan/h2_proxy_test streaming_error_response '
+ 'GRPC_POLL_STRATEGY=legacy'),
+ 'hudson.plugins.git.GitException',
+ 'Couldn\'t find any revision to build',
+ 'org.jenkinsci.plugin.Diskcheck.preCheckout',
+ 'Something went wrong while deleting Files',
+]
+_NO_REPORT_FILES_FOUND_ERROR = 'No test report files were found.'
+_UNKNOWN_ERROR = 'Unknown error'
+_DATASET_ID = 'build_statistics'
+
+
+def _scrape_for_known_errors(html):
+ error_list = []
+ for known_error in _KNOWN_ERRORS:
+ errors = re.findall(known_error, html)
+ this_error_count = len(errors)
+ if this_error_count > 0:
+ error_list.append({
+ 'description': known_error,
+ 'count': this_error_count
+ })
+ print('====> %d failures due to %s' %
+ (this_error_count, known_error))
+ return error_list
+
+
+def _no_report_files_found(html):
+ return _NO_REPORT_FILES_FOUND_ERROR in html
+
+
+def _get_last_processed_buildnumber(build_name):
+ query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
+ _PROJECT_ID, _DATASET_ID, build_name)
+ query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ if page['rows'][0]['f'][0]['v']:
+ return int(page['rows'][0]['f'][0]['v'])
+ return 0
+
+
+def _process_matrix(build, url_base):
+ matrix_list = []
+ for matrix in build.get_matrix_runs():
+ matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
+ matrix.name).groups()[0]
+ matrix_tuple = matrix_str.split(',')
+ json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
+ url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+ matrix_dict = {
+ 'name': matrix_str,
+ 'duration': matrix.get_duration().total_seconds()
+ }
+ matrix_dict.update(_process_build(json_url, console_url))
+ matrix_list.append(matrix_dict)
+
+ return matrix_list
+
+
+def _process_build(json_url, console_url):
+ build_result = {}
+ error_list = []
+ try:
+ html = urllib.urlopen(json_url).read()
+ test_result = json.loads(html)
+ print('====> Parsing result from %s' % json_url)
+ failure_count = test_result['failCount']
+ build_result['pass_count'] = test_result['passCount']
+ build_result['failure_count'] = failure_count
+ # This means Jenkins failure occurred.
+ build_result['no_report_files_found'] = _no_report_files_found(html)
+ # Only check errors if Jenkins failure occurred.
+ if build_result['no_report_files_found']:
+ error_list = _scrape_for_known_errors(html)
+ except Exception as e:
+ print('====> Got exception for %s: %s.' % (json_url, str(e)))
+ print('====> Parsing errors from %s.' % console_url)
+ html = urllib.urlopen(console_url).read()
+ build_result['pass_count'] = 0
+ build_result['failure_count'] = 1
+ # In this case, the string doesn't exist in the result html but the fact
+ # that we fail to parse the result html indicates Jenkins failure and hence
+ # no report files were generated.
+ build_result['no_report_files_found'] = True
+ error_list = _scrape_for_known_errors(html)
+
+ if error_list:
+ build_result['error'] = error_list
+ elif build_result['no_report_files_found']:
+ build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
+ else:
+ build_result['error'] = [{'description': '', 'count': 0}]
+
+ return build_result
+
+
+# parse command line
+argp = argparse.ArgumentParser(description='Get build statistics.')
+argp.add_argument('-u', '--username', default='jenkins')
+argp.add_argument('-b',
+ '--builds',
+ choices=['all'] + sorted(_BUILDS.keys()),
+ nargs='+',
+ default=['all'])
+args = argp.parse_args()
+
+J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
+bq = big_query_utils.create_big_query()
+
+for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
+ print('====> Build: %s' % build_name)
+ # Since get_last_completed_build() always fails due to malformatted string
+ # error, we use get_build_metadata() instead.
+ job = None
+ try:
+ job = J[build_name]
+ except Exception as e:
+ print('====> Failed to get build %s: %s.' % (build_name, str(e)))
+ continue
+ last_processed_build_number = _get_last_processed_buildnumber(build_name)
+ last_complete_build_number = job.get_last_completed_buildnumber()
+ # To avoid processing all builds for a project never looked at. In this case,
+ # only examine 10 latest builds.
+ starting_build_number = max(last_processed_build_number + 1,
+ last_complete_build_number - 9)
+ for build_number in xrange(starting_build_number,
+ last_complete_build_number + 1):
+ print('====> Processing %s build %d.' % (build_name, build_number))
+ build = None
+ try:
+ build = job.get_build_metadata(build_number)
+ print('====> Build status: %s.' % build.get_status())
+ if build.get_status() == 'ABORTED':
+ continue
+ # If any build is still running, stop processing this job. Next time, we
+ # start from where it was left so that all builds are processed
+ # sequentially.
+ if build.is_running():
+ print('====> Build %d is still running.' % build_number)
+ break
+ except KeyError:
+ print('====> Build %s is missing. Skip.' % build_number)
+ continue
+ build_result = {
+ 'build_number': build_number,
+ 'timestamp': str(build.get_timestamp())
+ }
+ url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
+ if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
+ build_result['matrix'] = _process_matrix(build, url_base)
+ else:
+ json_url = '%s/testReport/api/json' % url_base
+ console_url = '%s/consoleFull' % url_base
+ build_result['duration'] = build.get_duration().total_seconds()
+ build_stat = _process_build(json_url, console_url)
+ build_result.update(build_stat)
+ rows = [big_query_utils.make_row(build_number, build_result)]
+ if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+ build_name, rows):
+ print('====> Error uploading result to bigquery.')
+ sys.exit(1)
diff --git a/grpc/tools/run_tests/run_grpclb_interop_tests.py b/grpc/tools/run_tests/run_grpclb_interop_tests.py
new file mode 100755
index 00000000..dc2a70be
--- /dev/null
+++ b/grpc/tools/run_tests/run_grpclb_interop_tests.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run interop (cross-language) tests in parallel."""
+
+from __future__ import print_function
+
+import argparse
+import atexit
+import itertools
+import json
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import uuid
+import six
+import traceback
+
+import python_utils.dockerjob as dockerjob
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+
+# Docker doesn't clean up after itself, so we do it on exit.
+atexit.register(lambda: subprocess.call(['stty', 'echo']))
+
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(ROOT)
+
+_FALLBACK_SERVER_PORT = 443
+_BALANCER_SERVER_PORT = 12000
+_BACKEND_SERVER_PORT = 8080
+
+_TEST_TIMEOUT = 30
+
+_FAKE_SERVERS_SAFENAME = 'fake_servers'
+
+# Use a name that's verified by the test certs
+_SERVICE_NAME = 'server.test.google.fr'
+
+
+class CXXLanguage:
+
+ def __init__(self):
+ self.client_cwd = '/var/local/git/grpc'
+ self.safename = 'cxx'
+
+ def client_cmd(self, args):
+ return ['bins/opt/interop_client'] + args
+
+ def global_env(self):
+ # 1) Set c-ares as the resolver, to
+ # enable grpclb.
+ # 2) Turn on verbose logging.
+ # 3) Set the ROOTS_PATH env variable
+ # to the test CA in order for
+ # GoogleDefaultCredentials to be
+ # able to use the test CA.
+ return {
+ 'GRPC_DNS_RESOLVER':
+ 'ares',
+ 'GRPC_VERBOSITY':
+ 'DEBUG',
+ 'GRPC_TRACE':
+ 'client_channel,glb',
+ 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+ '/var/local/git/grpc/src/core/tsi/test_creds/ca.pem',
+ }
+
+ def __str__(self):
+ return 'c++'
+
+
+class JavaLanguage:
+
+ def __init__(self):
+ self.client_cwd = '/var/local/git/grpc-java'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ # Take necessary steps to import our test CA into
+ # the set of test CA's that the Java runtime of the
+ # docker container will pick up, so that
+ # Java GoogleDefaultCreds can use it.
+ pem_to_der_cmd = ('openssl x509 -outform der '
+ '-in /external_mount/src/core/tsi/test_creds/ca.pem '
+ '-out /tmp/test_ca.der')
+ keystore_import_cmd = (
+ 'keytool -import '
+ '-keystore /usr/lib/jvm/java-8-oracle/jre/lib/security/cacerts '
+ '-file /tmp/test_ca.der '
+ '-deststorepass changeit '
+ '-noprompt')
+ return [
+ 'bash', '-c',
+ ('{pem_to_der_cmd} && '
+ '{keystore_import_cmd} && '
+ './run-test-client.sh {java_client_args}').format(
+ pem_to_der_cmd=pem_to_der_cmd,
+ keystore_import_cmd=keystore_import_cmd,
+ java_client_args=' '.join(args))
+ ]
+
+ def global_env(self):
+ # 1) Enable grpclb
+ # 2) Enable verbose logging
+ return {
+ 'JAVA_OPTS': (
+ '-Dio.grpc.internal.DnsNameResolverProvider.enable_grpclb=true '
+ '-Djava.util.logging.config.file=/var/local/grpc_java_logging/logconf.txt'
+ )
+ }
+
+ def __str__(self):
+ return 'java'
+
+
+class GoLanguage:
+
+ def __init__(self):
+ self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ # Copy the test CA file into the path that
+ # the Go runtime in the docker container will use, so
+ # that Go's GoogleDefaultCredentials can use it.
+ # See https://golang.org/src/crypto/x509/root_linux.go.
+ return [
+ 'bash', '-c',
+ ('cp /external_mount/src/core/tsi/test_creds/ca.pem '
+ '/etc/ssl/certs/ca-certificates.crt && '
+ '/go/bin/client {go_client_args}').format(
+ go_client_args=' '.join(args))
+ ]
+
+ def global_env(self):
+ return {
+ 'GRPC_GO_LOG_VERBOSITY_LEVEL': '3',
+ 'GRPC_GO_LOG_SEVERITY_LEVEL': 'INFO'
+ }
+
+ def __str__(self):
+ return 'go'
+
+
+_LANGUAGES = {
+ 'c++': CXXLanguage(),
+ 'go': GoLanguage(),
+ 'java': JavaLanguage(),
+}
+
+
+def docker_run_cmdline(cmdline, image, docker_args, cwd, environ=None):
+ """Wraps given cmdline array to create 'docker run' cmdline from it."""
+ # turn environ into -e docker args
+ docker_cmdline = 'docker run -i --rm=true'.split()
+ if environ:
+ for k, v in environ.items():
+ docker_cmdline += ['-e', '%s=%s' % (k, v)]
+ return docker_cmdline + ['-w', cwd] + docker_args + [image] + cmdline
+
+
+def _job_kill_handler(job):
+ assert job._spec.container_name
+ dockerjob.docker_kill(job._spec.container_name)
+
+
+def transport_security_to_args(transport_security):
+ args = []
+ if transport_security == 'tls':
+ args += ['--use_tls=true']
+ elif transport_security == 'alts':
+ args += ['--use_tls=false', '--use_alts=true']
+ elif transport_security == 'insecure':
+ args += ['--use_tls=false']
+ elif transport_security == 'google_default_credentials':
+ args += ['--custom_credentials_type=google_default_credentials']
+ else:
+ print('Invalid transport security option.')
+ sys.exit(1)
+ return args
+
+
+def lb_client_interop_jobspec(language,
+ dns_server_ip,
+ docker_image,
+ transport_security='tls'):
+ """Runs a gRPC client under test in a docker container"""
+ interop_only_options = [
+ '--server_host=%s' % _SERVICE_NAME,
+ '--server_port=%d' % _FALLBACK_SERVER_PORT
+ ] + transport_security_to_args(transport_security)
+ # Don't set the server host override in any client;
+ # Go and Java default to no override.
+ # We're using a DNS server so there's no need.
+ if language.safename == 'c++':
+ interop_only_options += ['--server_host_override=""']
+ # Don't set --use_test_ca; we're configuring
+ # clients to use test CA's via alternate means.
+ interop_only_options += ['--use_test_ca=false']
+ client_args = language.client_cmd(interop_only_options)
+ container_name = dockerjob.random_name('lb_interop_client_%s' %
+ language.safename)
+ docker_cmdline = docker_run_cmdline(
+ client_args,
+ environ=language.global_env(),
+ image=docker_image,
+ cwd=language.client_cwd,
+ docker_args=[
+ '--dns=%s' % dns_server_ip,
+ '--net=host',
+ '--name=%s' % container_name,
+ '-v',
+ '{grpc_grpc_root_dir}:/external_mount:ro'.format(
+ grpc_grpc_root_dir=ROOT),
+ ])
+ jobset.message('IDLE',
+ 'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
+ do_newline=True)
+ test_job = jobset.JobSpec(cmdline=docker_cmdline,
+ shortname=('lb_interop_client:%s' % language),
+ timeout_seconds=_TEST_TIMEOUT,
+ kill_handler=_job_kill_handler)
+ test_job.container_name = container_name
+ return test_job
+
+
+def fallback_server_jobspec(transport_security, shortname):
+ """Create jobspec for running a fallback server"""
+ cmdline = [
+ 'bin/server',
+ '--port=%d' % _FALLBACK_SERVER_PORT,
+ ] + transport_security_to_args(transport_security)
+ return grpc_server_in_docker_jobspec(server_cmdline=cmdline,
+ shortname=shortname)
+
+
+def backend_server_jobspec(transport_security, shortname):
+ """Create jobspec for running a backend server"""
+ cmdline = [
+ 'bin/server',
+ '--port=%d' % _BACKEND_SERVER_PORT,
+ ] + transport_security_to_args(transport_security)
+ return grpc_server_in_docker_jobspec(server_cmdline=cmdline,
+ shortname=shortname)
+
+
+def grpclb_jobspec(transport_security, short_stream, backend_addrs, shortname):
+ """Create jobspec for running a balancer server"""
+ cmdline = [
+ 'bin/fake_grpclb',
+ '--backend_addrs=%s' % ','.join(backend_addrs),
+ '--port=%d' % _BALANCER_SERVER_PORT,
+ '--short_stream=%s' % short_stream,
+ '--service_name=%s' % _SERVICE_NAME,
+ ] + transport_security_to_args(transport_security)
+ return grpc_server_in_docker_jobspec(server_cmdline=cmdline,
+ shortname=shortname)
+
+
+def grpc_server_in_docker_jobspec(server_cmdline, shortname):
+ container_name = dockerjob.random_name(shortname)
+ environ = {
+ 'GRPC_GO_LOG_VERBOSITY_LEVEL': '3',
+ 'GRPC_GO_LOG_SEVERITY_LEVEL': 'INFO ',
+ }
+ docker_cmdline = docker_run_cmdline(
+ server_cmdline,
+ cwd='/go',
+ image=docker_images.get(_FAKE_SERVERS_SAFENAME),
+ environ=environ,
+ docker_args=['--name=%s' % container_name])
+ jobset.message('IDLE',
+ 'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
+ do_newline=True)
+ server_job = jobset.JobSpec(cmdline=docker_cmdline,
+ shortname=shortname,
+ timeout_seconds=30 * 60)
+ server_job.container_name = container_name
+ return server_job
+
+
+def dns_server_in_docker_jobspec(grpclb_ips, fallback_ips, shortname,
+ cause_no_error_no_data_for_balancer_a_record):
+ container_name = dockerjob.random_name(shortname)
+ run_dns_server_cmdline = [
+ 'python',
+ 'test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py',
+ '--grpclb_ips=%s' % ','.join(grpclb_ips),
+ '--fallback_ips=%s' % ','.join(fallback_ips),
+ ]
+ if cause_no_error_no_data_for_balancer_a_record:
+ run_dns_server_cmdline.append(
+ '--cause_no_error_no_data_for_balancer_a_record')
+ docker_cmdline = docker_run_cmdline(
+ run_dns_server_cmdline,
+ cwd='/var/local/git/grpc',
+ image=docker_images.get(_FAKE_SERVERS_SAFENAME),
+ docker_args=['--name=%s' % container_name])
+ jobset.message('IDLE',
+ 'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
+ do_newline=True)
+ server_job = jobset.JobSpec(cmdline=docker_cmdline,
+ shortname=shortname,
+ timeout_seconds=30 * 60)
+ server_job.container_name = container_name
+ return server_job
+
+
+def build_interop_image_jobspec(lang_safename, basename_prefix='grpc_interop'):
+ """Creates jobspec for building interop docker image for a language"""
+ tag = '%s_%s:%s' % (basename_prefix, lang_safename, uuid.uuid4())
+ env = {
+ 'INTEROP_IMAGE': tag,
+ 'BASE_NAME': '%s_%s' % (basename_prefix, lang_safename),
+ }
+ build_job = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+ environ=env,
+ shortname='build_docker_%s' % lang_safename,
+ timeout_seconds=30 * 60)
+ build_job.tag = tag
+ return build_job
+
+
+argp = argparse.ArgumentParser(description='Run interop tests.')
+argp.add_argument('-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES),
+ nargs='+',
+ default=['all'],
+ help='Clients to run.')
+argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
+argp.add_argument('-s',
+ '--scenarios_file',
+ default=None,
+ type=str,
+ help='File containing test scenarios as JSON configs.')
+argp.add_argument(
+ '-n',
+ '--scenario_name',
+ default=None,
+ type=str,
+ help=(
+ 'Useful for manual runs: specify the name of '
+ 'the scenario to run from scenarios_file. Run all scenarios if unset.'))
+argp.add_argument('--cxx_image_tag',
+ default=None,
+ type=str,
+ help=('Setting this skips the clients docker image '
+ 'build step and runs the client from the named '
+ 'image. Only supports running a one client language.'))
+argp.add_argument('--go_image_tag',
+ default=None,
+ type=str,
+ help=('Setting this skips the clients docker image build '
+ 'step and runs the client from the named image. Only '
+ 'supports running a one client language.'))
+argp.add_argument('--java_image_tag',
+ default=None,
+ type=str,
+ help=('Setting this skips the clients docker image build '
+ 'step and runs the client from the named image. Only '
+ 'supports running a one client language.'))
+argp.add_argument(
+ '--servers_image_tag',
+ default=None,
+ type=str,
+ help=('Setting this skips the fake servers docker image '
+ 'build step and runs the servers from the named image.'))
+argp.add_argument('--no_skips',
+ default=False,
+ type=bool,
+ nargs='?',
+ const=True,
+ help=('Useful for manual runs. Setting this overrides test '
+ '"skips" configured in test scenarios.'))
+argp.add_argument('--verbose',
+ default=False,
+ type=bool,
+ nargs='?',
+ const=True,
+ help='Increase logging.')
+args = argp.parse_args()
+
+docker_images = {}
+
+build_jobs = []
+if len(args.language) and args.language[0] == 'all':
+ languages = _LANGUAGES.keys()
+else:
+ languages = args.language
+for lang_name in languages:
+ l = _LANGUAGES[lang_name]
+ # First check if a pre-built image was supplied, and avoid
+ # rebuilding the particular docker image if so.
+ if lang_name == 'c++' and args.cxx_image_tag:
+ docker_images[str(l.safename)] = args.cxx_image_tag
+ elif lang_name == 'go' and args.go_image_tag:
+ docker_images[str(l.safename)] = args.go_image_tag
+ elif lang_name == 'java' and args.java_image_tag:
+ docker_images[str(l.safename)] = args.java_image_tag
+ else:
+ # Build the test client in docker and save the fully
+ # built image.
+ job = build_interop_image_jobspec(l.safename)
+ build_jobs.append(job)
+ docker_images[str(l.safename)] = job.tag
+
+# First check if a pre-built image was supplied.
+if args.servers_image_tag:
+ docker_images[_FAKE_SERVERS_SAFENAME] = args.servers_image_tag
+else:
+ # Build the test servers in docker and save the fully
+ # built image.
+ job = build_interop_image_jobspec(_FAKE_SERVERS_SAFENAME,
+ basename_prefix='lb_interop')
+ build_jobs.append(job)
+ docker_images[_FAKE_SERVERS_SAFENAME] = job.tag
+
+if build_jobs:
+ jobset.message('START', 'Building interop docker images.', do_newline=True)
+ print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+ num_failures, _ = jobset.run(build_jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'All docker images built successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED',
+ 'Failed to build interop docker images.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def wait_until_dns_server_is_up(dns_server_ip):
+ """Probes the DNS server until it's running and safe for tests."""
+ for i in range(0, 30):
+ print('Health check: attempt to connect to DNS server over TCP.')
+ tcp_connect_subprocess = subprocess.Popen([
+ os.path.join(os.getcwd(), 'test/cpp/naming/utils/tcp_connect.py'),
+ '--server_host', dns_server_ip, '--server_port',
+ str(53), '--timeout',
+ str(1)
+ ])
+ tcp_connect_subprocess.communicate()
+ if tcp_connect_subprocess.returncode == 0:
+ print(('Health check: attempt to make an A-record '
+ 'query to DNS server.'))
+ dns_resolver_subprocess = subprocess.Popen([
+ os.path.join(os.getcwd(),
+ 'test/cpp/naming/utils/dns_resolver.py'),
+ '--qname',
+ ('health-check-local-dns-server-is-alive.'
+ 'resolver-tests.grpctestingexp'), '--server_host',
+ dns_server_ip, '--server_port',
+ str(53)
+ ],
+ stdout=subprocess.PIPE)
+ dns_resolver_stdout, _ = dns_resolver_subprocess.communicate()
+ if dns_resolver_subprocess.returncode == 0:
+ if '123.123.123.123' in dns_resolver_stdout:
+ print(('DNS server is up! '
+ 'Successfully reached it over UDP and TCP.'))
+ return
+ time.sleep(0.1)
+ raise Exception(('Failed to reach DNS server over TCP and/or UDP. '
+ 'Exitting without running tests.'))
+
+
+def shortname(shortname_prefix, shortname, index):
+ return '%s_%s_%d' % (shortname_prefix, shortname, index)
+
+
+def run_one_scenario(scenario_config):
+ jobset.message('START', 'Run scenario: %s' % scenario_config['name'])
+ server_jobs = {}
+ server_addresses = {}
+ suppress_server_logs = True
+ try:
+ backend_addrs = []
+ fallback_ips = []
+ grpclb_ips = []
+ shortname_prefix = scenario_config['name']
+ # Start backends
+ for i in xrange(len(scenario_config['backend_configs'])):
+ backend_config = scenario_config['backend_configs'][i]
+ backend_shortname = shortname(shortname_prefix, 'backend_server', i)
+ backend_spec = backend_server_jobspec(
+ backend_config['transport_sec'], backend_shortname)
+ backend_job = dockerjob.DockerJob(backend_spec)
+ server_jobs[backend_shortname] = backend_job
+ backend_addrs.append(
+ '%s:%d' % (backend_job.ip_address(), _BACKEND_SERVER_PORT))
+ # Start fallbacks
+ for i in xrange(len(scenario_config['fallback_configs'])):
+ fallback_config = scenario_config['fallback_configs'][i]
+ fallback_shortname = shortname(shortname_prefix, 'fallback_server',
+ i)
+ fallback_spec = fallback_server_jobspec(
+ fallback_config['transport_sec'], fallback_shortname)
+ fallback_job = dockerjob.DockerJob(fallback_spec)
+ server_jobs[fallback_shortname] = fallback_job
+ fallback_ips.append(fallback_job.ip_address())
+ # Start balancers
+ for i in xrange(len(scenario_config['balancer_configs'])):
+ balancer_config = scenario_config['balancer_configs'][i]
+ grpclb_shortname = shortname(shortname_prefix, 'grpclb_server', i)
+ grpclb_spec = grpclb_jobspec(balancer_config['transport_sec'],
+ balancer_config['short_stream'],
+ backend_addrs, grpclb_shortname)
+ grpclb_job = dockerjob.DockerJob(grpclb_spec)
+ server_jobs[grpclb_shortname] = grpclb_job
+ grpclb_ips.append(grpclb_job.ip_address())
+ # Start DNS server
+ dns_server_shortname = shortname(shortname_prefix, 'dns_server', 0)
+ dns_server_spec = dns_server_in_docker_jobspec(
+ grpclb_ips, fallback_ips, dns_server_shortname,
+ scenario_config['cause_no_error_no_data_for_balancer_a_record'])
+ dns_server_job = dockerjob.DockerJob(dns_server_spec)
+ server_jobs[dns_server_shortname] = dns_server_job
+ # Get the IP address of the docker container running the DNS server.
+ # The DNS server is running on port 53 of that IP address. Note we will
+ # point the DNS resolvers of grpc clients under test to our controlled
+ # DNS server by effectively modifying the /etc/resolve.conf "nameserver"
+ # lists of their docker containers.
+ dns_server_ip = dns_server_job.ip_address()
+ wait_until_dns_server_is_up(dns_server_ip)
+ # Run clients
+ jobs = []
+ for lang_name in languages:
+ # Skip languages that are known to not currently
+ # work for this test.
+ if not args.no_skips and lang_name in scenario_config.get(
+ 'skip_langs', []):
+ jobset.message(
+ 'IDLE', 'Skipping scenario: %s for language: %s\n' %
+ (scenario_config['name'], lang_name))
+ continue
+ lang = _LANGUAGES[lang_name]
+ test_job = lb_client_interop_jobspec(
+ lang,
+ dns_server_ip,
+ docker_image=docker_images.get(lang.safename),
+ transport_security=scenario_config['transport_sec'])
+ jobs.append(test_job)
+ jobset.message(
+ 'IDLE', 'Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+ num_failures, resultset = jobset.run(jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs)
+ report_utils.render_junit_xml_report(resultset, 'sponge_log.xml')
+ if num_failures:
+ suppress_server_logs = False
+ jobset.message('FAILED',
+ 'Scenario: %s. Some tests failed' %
+ scenario_config['name'],
+ do_newline=True)
+ else:
+ jobset.message('SUCCESS',
+ 'Scenario: %s. All tests passed' %
+ scenario_config['name'],
+ do_newline=True)
+ return num_failures
+ finally:
+ # Check if servers are still running.
+ for server, job in server_jobs.items():
+ if not job.is_running():
+ print('Server "%s" has exited prematurely.' % server)
+ suppress_failure = suppress_server_logs and not args.verbose
+ dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)],
+ suppress_failure=suppress_failure)
+
+
+num_failures = 0
+with open(args.scenarios_file, 'r') as scenarios_input:
+ all_scenarios = json.loads(scenarios_input.read())
+ for scenario in all_scenarios:
+ if args.scenario_name:
+ if args.scenario_name != scenario['name']:
+ jobset.message('IDLE',
+ 'Skipping scenario: %s' % scenario['name'])
+ continue
+ num_failures += run_one_scenario(scenario)
+if num_failures == 0:
+ sys.exit(0)
+else:
+ sys.exit(1)
diff --git a/grpc/tools/run_tests/run_interop_tests.py b/grpc/tools/run_tests/run_interop_tests.py
new file mode 100755
index 00000000..d183c5b4
--- /dev/null
+++ b/grpc/tools/run_tests/run_interop_tests.py
@@ -0,0 +1,1719 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run interop (cross-language) tests in parallel."""
+
+from __future__ import print_function
+
+import argparse
+import atexit
+import itertools
+import json
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import uuid
+import six
+import traceback
+
+import python_utils.dockerjob as dockerjob
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+# It's ok to not import because this is only necessary to upload results to BQ.
+try:
+ from python_utils.upload_test_results import upload_interop_results_to_bq
+except ImportError as e:
+ print(e)
+
+# Docker doesn't clean up after itself, so we do it on exit.
+atexit.register(lambda: subprocess.call(['stty', 'echo']))
+
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(ROOT)
+
+_DEFAULT_SERVER_PORT = 8080
+
+_SKIP_CLIENT_COMPRESSION = [
+ 'client_compressed_unary', 'client_compressed_streaming'
+]
+
+_SKIP_SERVER_COMPRESSION = [
+ 'server_compressed_unary', 'server_compressed_streaming'
+]
+
+_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
+
+_SKIP_ADVANCED = [
+ 'status_code_and_message', 'custom_metadata', 'unimplemented_method',
+ 'unimplemented_service'
+]
+
+_SKIP_SPECIAL_STATUS_MESSAGE = ['special_status_message']
+
+_GOOGLE_DEFAULT_CREDS_TEST_CASE = 'google_default_credentials'
+
+_SKIP_GOOGLE_DEFAULT_CREDS = [
+ _GOOGLE_DEFAULT_CREDS_TEST_CASE,
+]
+
+_COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE = 'compute_engine_channel_credentials'
+
+_SKIP_COMPUTE_ENGINE_CHANNEL_CREDS = [
+ _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
+]
+
+_TEST_TIMEOUT = 3 * 60
+
+# disable this test on core-based languages,
+# see https://github.com/grpc/grpc/issues/9779
+_SKIP_DATA_FRAME_PADDING = ['data_frame_padding']
+
+# report suffix "sponge_log.xml" is important for reports to get picked up by internal CI
+_DOCKER_BUILD_XML_REPORT = 'interop_docker_build/sponge_log.xml'
+_TESTS_XML_REPORT = 'interop_test/sponge_log.xml'
+
+
+class CXXLanguage:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = 'cxx'
+
+ def client_cmd(self, args):
+ return ['bins/opt/interop_client'] + args
+
+ def client_cmd_http2interop(self, args):
+ return ['bins/opt/http2_client'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['bins/opt/interop_server'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'c++'
+
+
+class CSharpLanguage:
+
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'csharp'
+
+
+class CSharpCoreCLRLanguage:
+
+ def __init__(self):
+ self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp2.1'
+ self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp2.1'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'csharpcoreclr'
+
+
+class AspNetCoreLanguage:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-dotnet/testassets/InteropTestsClient/bin/Debug/netcoreapp3.0'
+ self.server_cwd = '../grpc-dotnet/testassets/InteropTestsWebsite/bin/Debug/netcoreapp3.0'
+ self.safename = str(self)
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def client_cmd(self, args):
+ return ['dotnet', 'exec', 'InteropTestsClient.dll'] + args
+
+ def server_cmd(self, args):
+ return ['dotnet', 'exec', 'InteropTestsWebsite.dll'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'aspnetcore'
+
+
+class DartLanguage:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-dart/interop'
+ self.server_cwd = '../grpc-dart/interop'
+ self.http2_cwd = '../grpc-dart/interop'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['dart', 'bin/client.dart'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['dart', 'bin/server.dart'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION + _SKIP_SPECIAL_STATUS_MESSAGE
+
+ def __str__(self):
+ return 'dart'
+
+
+class JavaLanguage:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.server_cwd = '../grpc-java'
+ self.http2_cwd = '../grpc-java'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['./run-test-client.sh'] + args
+
+ def client_cmd_http2interop(self, args):
+ return [
+ './interop-testing/build/install/grpc-interop-testing/bin/http2-client'
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['./run-test-server.sh'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return []
+
+ def unimplemented_test_cases_server(self):
+ # Does not support CompressedRequest feature.
+ # Only supports CompressedResponse feature for unary.
+ return _SKIP_CLIENT_COMPRESSION + ['server_compressed_streaming']
+
+ def __str__(self):
+ return 'java'
+
+
+class JavaOkHttpClient:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-java'
+ self.safename = 'java'
+
+ def client_cmd(self, args):
+ return ['./run-test-client.sh', '--use_okhttp=true'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_DATA_FRAME_PADDING
+
+ def __str__(self):
+ return 'javaokhttp'
+
+
+class GoLanguage:
+
+ def __init__(self):
+ # TODO: this relies on running inside docker
+ self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+ self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+ self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['go', 'run', 'client.go'] + args
+
+ def client_cmd_http2interop(self, args):
+ return ['go', 'run', 'negative_http2_client.go'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return ['go', 'run', 'server.go'] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'go'
+
+
+class Http2Server:
+ """Represents the HTTP/2 Interop Test server
+
+ This pretends to be a language in order to be built and run, but really it
+ isn't.
+ """
+
+ def __init__(self):
+ self.server_cwd = None
+ self.safename = str(self)
+
+ def server_cmd(self, args):
+ return ['python test/http2_test/http2_test_server.py']
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _TEST_CASES + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
+
+ def __str__(self):
+ return 'http2'
+
+
+class Http2Client:
+ """Represents the HTTP/2 Interop Test
+
+ This pretends to be a language in order to be built and run, but really it
+ isn't.
+ """
+
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _TEST_CASES + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _TEST_CASES
+
+ def __str__(self):
+ return 'http2'
+
+
+class NodeLanguage:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-node'
+ self.server_cwd = '../grpc-node'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ 'node', '--require', './test/fixtures/native_native',
+ 'test/interop/interop_client.js'
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ 'node', '--require', './test/fixtures/native_native',
+ 'test/interop/interop_server.js'
+ ] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'node'
+
+
+class NodePureJSLanguage:
+
+ def __init__(self):
+ self.client_cwd = '../grpc-node'
+ self.server_cwd = '../grpc-node'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return [
+ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+ 'node', '--require', './test/fixtures/js_js',
+ 'test/interop/interop_client.js'
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'nodepurejs'
+
+
+class PHPLanguage:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'php'
+
+
+class PHP7Language:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return ['src/php/bin/interop_client.sh'] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return []
+
+ def __str__(self):
+ return 'php7'
+
+
+class ObjcLanguage:
+
+ def __init__(self):
+ self.client_cwd = 'src/objective-c/tests'
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ # from args, extract the server port and craft xcodebuild command out of it
+ for arg in args:
+ port = re.search('--server_port=(\d+)', arg)
+ if port:
+ portnum = port.group(1)
+ cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
+ return [cmdline]
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ # ObjC test runs all cases with the same command. It ignores the testcase
+ # cmdline argument. Here we return all but one test cases as unimplemented,
+ # and depend upon ObjC test's behavior that it runs all cases even when
+ # we tell it to run just one.
+ return _TEST_CASES[1:] + \
+ _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'objc'
+
+
+class RubyLanguage:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/client.rb'
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return [
+ 'tools/run_tests/interop/with_rvm.sh', 'ruby',
+ 'src/ruby/pb/test/server.rb'
+ ] + args
+
+ def global_env(self):
+ return {}
+
+ def unimplemented_test_cases(self):
+ return _SKIP_SERVER_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_SPECIAL_STATUS_MESSAGE + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'ruby'
+
+
+class PythonLanguage:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return [
+ 'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--client', '--args="{}"'.format(' '.join(args))
+ ]
+
+ def client_cmd_http2interop(self, args):
+ return [
+ 'py37_native/bin/python',
+ 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return [
+ 'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--server', '--args="{}"'.format(' '.join(args))
+ ]
+
+ def global_env(self):
+ return {
+ 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+ 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+ }
+
+ def unimplemented_test_cases(self):
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _SKIP_GOOGLE_DEFAULT_CREDS + \
+ _SKIP_COMPUTE_ENGINE_CHANNEL_CREDS
+
+ def unimplemented_test_cases_server(self):
+ return _SKIP_COMPRESSION
+
+ def __str__(self):
+ return 'python'
+
+
+class PythonAsyncIOLanguage:
+
+ def __init__(self):
+ self.client_cwd = None
+ self.server_cwd = None
+ self.http2_cwd = None
+ self.safename = str(self)
+
+ def client_cmd(self, args):
+ return [
+ 'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--use-asyncio', '--client',
+ '--args="{}"'.format(' '.join(args))
+ ]
+
+ def client_cmd_http2interop(self, args):
+ return [
+ 'py37_native/bin/python',
+ 'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+ ] + args
+
+ def cloud_to_prod_env(self):
+ return {}
+
+ def server_cmd(self, args):
+ return [
+ 'py37_native/bin/python', 'src/python/grpcio_tests/setup.py',
+ 'run_interop', '--use-asyncio', '--server',
+ '--args="{}"'.format(' '.join(args))
+ ]
+
+ def global_env(self):
+ return {
+ 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+ 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+ }
+
+ def unimplemented_test_cases(self):
+ # TODO(https://github.com/grpc/grpc/issues/21707)
+ return _SKIP_COMPRESSION + \
+ _SKIP_DATA_FRAME_PADDING + \
+ _AUTH_TEST_CASES + \
+ ['timeout_on_sleeping_server']
+
+ def unimplemented_test_cases_server(self):
+ # TODO(https://github.com/grpc/grpc/issues/21749)
+ return _TEST_CASES + \
+ _AUTH_TEST_CASES + \
+ _HTTP2_TEST_CASES + \
+ _HTTP2_SERVER_TEST_CASES
+
+ def __str__(self):
+ return 'pythonasyncio'
+
+
+_LANGUAGES = {
+ 'c++': CXXLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'csharpcoreclr': CSharpCoreCLRLanguage(),
+ 'aspnetcore': AspNetCoreLanguage(),
+ 'dart': DartLanguage(),
+ 'go': GoLanguage(),
+ 'java': JavaLanguage(),
+ 'javaokhttp': JavaOkHttpClient(),
+ 'node': NodeLanguage(),
+ 'nodepurejs': NodePureJSLanguage(),
+ 'php': PHPLanguage(),
+ 'php7': PHP7Language(),
+ 'objc': ObjcLanguage(),
+ 'ruby': RubyLanguage(),
+ 'python': PythonLanguage(),
+ 'pythonasyncio': PythonAsyncIOLanguage(),
+}
+
+# languages supported as cloud_to_cloud servers
+_SERVERS = [
+ 'c++', 'node', 'csharp', 'csharpcoreclr', 'aspnetcore', 'java', 'go',
+ 'ruby', 'python', 'dart', 'pythonasyncio'
+]
+
+_TEST_CASES = [
+ 'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
+ 'client_streaming', 'server_streaming', 'cancel_after_begin',
+ 'cancel_after_first_response', 'timeout_on_sleeping_server',
+ 'custom_metadata', 'status_code_and_message', 'unimplemented_method',
+ 'client_compressed_unary', 'server_compressed_unary',
+ 'client_compressed_streaming', 'server_compressed_streaming',
+ 'unimplemented_service', 'special_status_message'
+]
+
+_AUTH_TEST_CASES = [
+ 'compute_engine_creds',
+ 'jwt_token_creds',
+ 'oauth2_auth_token',
+ 'per_rpc_creds',
+ _GOOGLE_DEFAULT_CREDS_TEST_CASE,
+ _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE,
+]
+
+_HTTP2_TEST_CASES = ['tls', 'framing']
+
+_HTTP2_SERVER_TEST_CASES = [
+ 'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
+ 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
+]
+
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
+ 'data_frame_padding': 'large_unary',
+ 'no_df_padding_sanity_test': 'large_unary'
+}
+
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
+)
+
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
+ 'java', 'go', 'python', 'c++'
+]
+
+_LANGUAGES_FOR_ALTS_TEST_CASES = ['java', 'go', 'c++']
+
+_SERVERS_FOR_ALTS_TEST_CASES = ['java', 'go', 'c++']
+
+_TRANSPORT_SECURITY_OPTIONS = ['tls', 'alts', 'insecure']
+
+_CUSTOM_CREDENTIALS_TYPE_OPTIONS = [
+ 'tls', 'google_default_credentials', 'compute_engine_channel_creds'
+]
+
+DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
+
+
+def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
+ """Wraps given cmdline array to create 'docker run' cmdline from it."""
+ docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+
+ # turn environ into -e docker args
+ if environ:
+ for k, v in environ.items():
+ docker_cmdline += ['-e', '%s=%s' % (k, v)]
+
+ # set working directory
+ workdir = DOCKER_WORKDIR_ROOT
+ if cwd:
+ workdir = os.path.join(workdir, cwd)
+ docker_cmdline += ['-w', workdir]
+
+ docker_cmdline += docker_args + [image] + cmdline
+ return docker_cmdline
+
+
+def manual_cmdline(docker_cmdline, docker_image):
+ """Returns docker cmdline adjusted for manual invocation."""
+ print_cmdline = []
+ for item in docker_cmdline:
+ if item.startswith('--name='):
+ continue
+ if item == docker_image:
+ item = "$docker_image"
+ item = item.replace('"', '\\"')
+ # add quotes when necessary
+ if any(character.isspace() for character in item):
+ item = "\"%s\"" % item
+ print_cmdline.append(item)
+ return ' '.join(print_cmdline)
+
+
+def write_cmdlog_maybe(cmdlog, filename):
+ """Returns docker cmdline adjusted for manual invocation."""
+ if cmdlog:
+ with open(filename, 'w') as logfile:
+ logfile.write('#!/bin/bash\n')
+ logfile.write('# DO NOT MODIFY\n')
+ logfile.write(
+ '# This file is generated by run_interop_tests.py/create_testcases.sh\n'
+ )
+ logfile.writelines("%s\n" % line for line in cmdlog)
+ print('Command log written to file %s' % filename)
+
+
+def bash_cmdline(cmdline):
+ """Creates bash -c cmdline from args list."""
+ # Use login shell:
+ # * makes error messages clearer if executables are missing
+ return ['bash', '-c', ' '.join(cmdline)]
+
+
+def compute_engine_creds_required(language, test_case):
+ """Returns True if given test requires access to compute engine creds."""
+ language = str(language)
+ if test_case == 'compute_engine_creds':
+ return True
+ if test_case == 'oauth2_auth_token' and language == 'c++':
+ # C++ oauth2 test uses GCE creds because C++ only supports JWT
+ return True
+ return False
+
+
+def auth_options(language, test_case, google_default_creds_use_key_file,
+ service_account_key_file, default_service_account):
+ """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
+
+ language = str(language)
+ cmdargs = []
+ env = {}
+
+ oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
+ key_file_arg = '--service_account_key_file=%s' % service_account_key_file
+ default_account_arg = '--default_service_account=%s' % default_service_account
+
+ if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
+ if language in [
+ 'csharp', 'csharpcoreclr', 'aspnetcore', 'node', 'php', 'php7',
+ 'python', 'ruby', 'nodepurejs'
+ ]:
+ env['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_file
+ else:
+ cmdargs += [key_file_arg]
+
+ if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
+ cmdargs += [oauth_scope_arg]
+
+ if test_case == 'oauth2_auth_token' and language == 'c++':
+ # C++ oauth2 test uses GCE creds and thus needs to know the default account
+ cmdargs += [default_account_arg]
+
+ if test_case == 'compute_engine_creds':
+ cmdargs += [oauth_scope_arg, default_account_arg]
+
+ if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
+ if google_default_creds_use_key_file:
+ env['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_file
+ cmdargs += [default_account_arg]
+
+ if test_case == _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE:
+ cmdargs += [default_account_arg]
+
+ return (cmdargs, env)
+
+
+def _job_kill_handler(job):
+ if job._spec.container_name:
+ dockerjob.docker_kill(job._spec.container_name)
+ # When the job times out and we decide to kill it,
+ # we need to wait a before restarting the job
+ # to prevent "container name already in use" error.
+ # TODO(jtattermusch): figure out a cleaner way to this.
+ time.sleep(2)
+
+
+def cloud_to_prod_jobspec(language,
+ test_case,
+ server_host_nickname,
+ server_host,
+ google_default_creds_use_key_file,
+ docker_image=None,
+ auth=False,
+ manual_cmd_log=None,
+ service_account_key_file=None,
+ default_service_account=None,
+ transport_security='tls'):
+ """Creates jobspec for cloud-to-prod interop test"""
+ container_name = None
+ cmdargs = [
+ '--server_host=%s' % server_host, '--server_port=443',
+ '--test_case=%s' % test_case
+ ]
+ if transport_security == 'tls':
+ transport_security_options = ['--use_tls=true']
+ elif transport_security == 'google_default_credentials' and str(
+ language) in ['c++', 'go', 'java', 'javaokhttp']:
+ transport_security_options = [
+ '--custom_credentials_type=google_default_credentials'
+ ]
+ elif transport_security == 'compute_engine_channel_creds' and str(
+ language) in ['go', 'java', 'javaokhttp']:
+ transport_security_options = [
+ '--custom_credentials_type=compute_engine_channel_creds'
+ ]
+ else:
+ print(
+ 'Invalid transport security option %s in cloud_to_prod_jobspec. Lang: %s'
+ % (str(language), transport_security))
+ sys.exit(1)
+ cmdargs = cmdargs + transport_security_options
+ environ = dict(language.cloud_to_prod_env(), **language.global_env())
+ if auth:
+ auth_cmdargs, auth_env = auth_options(
+ language, test_case, google_default_creds_use_key_file,
+ service_account_key_file, default_service_account)
+ cmdargs += auth_cmdargs
+ environ.update(auth_env)
+ cmdline = bash_cmdline(language.client_cmd(cmdargs))
+ cwd = language.client_cwd
+
+ if docker_image:
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ cwd=cwd,
+ environ=environ,
+ docker_args=['--net=host',
+ '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+ environ = None
+
+ suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
+ test_job = jobset.JobSpec(cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='%s:%s:%s:%s:%s' %
+ (suite_name, language, server_host_nickname,
+ test_case, transport_security),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
+
+
+def cloud_to_cloud_jobspec(language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=None,
+ transport_security='tls',
+ manual_cmd_log=None):
+ """Creates jobspec for cloud-to-cloud interop test"""
+ interop_only_options = [
+ '--server_host_override=foo.test.google.fr',
+ '--use_test_ca=true',
+ ]
+ if transport_security == 'tls':
+ interop_only_options += ['--use_tls=true']
+ elif transport_security == 'alts':
+ interop_only_options += ['--use_tls=false', '--use_alts=true']
+ elif transport_security == 'insecure':
+ interop_only_options += ['--use_tls=false']
+ else:
+ print(
+ 'Invalid transport security option %s in cloud_to_cloud_jobspec.' %
+ transport_security)
+ sys.exit(1)
+
+ client_test_case = test_case
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
+ test_case]
+ if client_test_case in language.unimplemented_test_cases():
+ print('asking client %s to run unimplemented test case %s' %
+ (repr(language), client_test_case))
+ sys.exit(1)
+
+ common_options = [
+ '--test_case=%s' % client_test_case,
+ '--server_host=%s' % server_host,
+ '--server_port=%s' % server_port,
+ ]
+
+ if test_case in _HTTP2_SERVER_TEST_CASES:
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_options = interop_only_options + common_options
+ cmdline = bash_cmdline(language.client_cmd(client_options))
+ cwd = language.client_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd_http2interop(common_options))
+ cwd = language.http2_cwd
+ else:
+ cmdline = bash_cmdline(
+ language.client_cmd(common_options + interop_only_options))
+ cwd = language.client_cwd
+
+ environ = language.global_env()
+ if docker_image and language.safename != 'objc':
+ # we can't run client in docker for objc.
+ container_name = dockerjob.random_name('interop_client_%s' %
+ language.safename)
+ cmdline = docker_run_cmdline(
+ cmdline,
+ image=docker_image,
+ environ=environ,
+ cwd=cwd,
+ docker_args=['--net=host',
+ '--name=%s' % container_name])
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+ cwd = None
+
+ test_job = jobset.JobSpec(
+ cmdline=cmdline,
+ cwd=cwd,
+ environ=environ,
+ shortname='cloud_to_cloud:%s:%s_server:%s:%s' %
+ (language, server_name, test_case, transport_security),
+ timeout_seconds=_TEST_TIMEOUT,
+ flake_retries=4 if args.allow_flakes else 0,
+ timeout_retries=2 if args.allow_flakes else 0,
+ kill_handler=_job_kill_handler)
+ if docker_image:
+ test_job.container_name = container_name
+ return test_job
+
+
+def server_jobspec(language,
+ docker_image,
+ transport_security='tls',
+ manual_cmd_log=None):
+ """Create jobspec for running a server"""
+ container_name = dockerjob.random_name('interop_server_%s' %
+ language.safename)
+ server_cmd = ['--port=%s' % _DEFAULT_SERVER_PORT]
+ if transport_security == 'tls':
+ server_cmd += ['--use_tls=true']
+ elif transport_security == 'alts':
+ server_cmd += ['--use_tls=false', '--use_alts=true']
+ elif transport_security == 'insecure':
+ server_cmd += ['--use_tls=false']
+ else:
+ print('Invalid transport security option %s in server_jobspec.' %
+ transport_security)
+ sys.exit(1)
+ cmdline = bash_cmdline(language.server_cmd(server_cmd))
+ environ = language.global_env()
+ docker_args = ['--name=%s' % container_name]
+ if language.safename == 'http2':
+ # we are running the http2 interop server. Open next N ports beginning
+ # with the server port. These ports are used for http2 interop test
+ # (one test case per port).
+ docker_args += list(
+ itertools.chain.from_iterable(
+ ('-p', str(_DEFAULT_SERVER_PORT + i))
+ for i in range(len(_HTTP2_SERVER_TEST_CASES))))
+ # Enable docker's healthcheck mechanism.
+ # This runs a Python script inside the container every second. The script
+ # pings the http2 server to verify it is ready. The 'health-retries' flag
+ # specifies the number of consecutive failures before docker will report
+ # the container's status as 'unhealthy'. Prior to the first 'health_retries'
+ # failures or the first success, the status will be 'starting'. 'docker ps'
+ # or 'docker inspect' can be used to see the health of the container on the
+ # command line.
+ docker_args += [
+ '--health-cmd=python test/http2_test/http2_server_health_check.py '
+ '--server_host=%s --server_port=%d' %
+ ('localhost', _DEFAULT_SERVER_PORT),
+ '--health-interval=1s',
+ '--health-retries=5',
+ '--health-timeout=10s',
+ ]
+
+ else:
+ docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
+
+ docker_cmdline = docker_run_cmdline(cmdline,
+ image=docker_image,
+ cwd=language.server_cwd,
+ environ=environ,
+ docker_args=docker_args)
+ if manual_cmd_log is not None:
+ if manual_cmd_log == []:
+ manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
+ docker_image)
+ manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
+ server_job = jobset.JobSpec(cmdline=docker_cmdline,
+ environ=environ,
+ shortname='interop_server_%s' % language,
+ timeout_seconds=30 * 60)
+ server_job.container_name = container_name
+ return server_job
+
+
+def build_interop_image_jobspec(language, tag=None):
+ """Creates jobspec for building interop docker image for a language"""
+ if not tag:
+ tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
+ env = {
+ 'INTEROP_IMAGE': tag,
+ 'BASE_NAME': 'grpc_interop_%s' % language.safename
+ }
+ if not args.travis:
+ env['TTY_FLAG'] = '-t'
+ # This env variable is used to get around the github rate limit
+ # error when running the PHP `composer install` command
+ host_file = '%s/.composer/auth.json' % os.environ['HOME']
+ if language.safename == 'php' and os.path.exists(host_file):
+ env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
+ '-v %s:/root/.composer/auth.json:ro' % host_file
+ build_job = jobset.JobSpec(
+ cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+ environ=env,
+ shortname='build_docker_%s' % (language),
+ timeout_seconds=30 * 60)
+ build_job.tag = tag
+ return build_job
+
+
+def aggregate_http2_results(stdout):
+ match = re.search(r'\{"cases[^\]]*\]\}', stdout)
+ if not match:
+ return None
+
+ results = json.loads(match.group(0))
+ skipped = 0
+ passed = 0
+ failed = 0
+ failed_cases = []
+ for case in results['cases']:
+ if case.get('skipped', False):
+ skipped += 1
+ else:
+ if case.get('passed', False):
+ passed += 1
+ else:
+ failed += 1
+ failed_cases.append(case.get('name', "NONAME"))
+ return {
+ 'passed': passed,
+ 'failed': failed,
+ 'skipped': skipped,
+ 'failed_cases': ', '.join(failed_cases),
+ 'percent': 1.0 * passed / (passed + failed)
+ }
+
+
+# A dictionary of prod servers to test against.
+# See go/grpc-interop-tests (internal-only) for details.
+prod_servers = {
+ 'default': 'grpc-test.sandbox.googleapis.com',
+ 'gateway_v4': 'grpc-test4.sandbox.googleapis.com',
+}
+
+argp = argparse.ArgumentParser(description='Run interop tests.')
+argp.add_argument('-l',
+ '--language',
+ choices=['all'] + sorted(_LANGUAGES),
+ nargs='+',
+ default=['all'],
+ help='Clients to run. Objc client can be only run on OSX.')
+argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
+argp.add_argument('--cloud_to_prod',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod tests.')
+argp.add_argument('--cloud_to_prod_auth',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run cloud_to_prod_auth tests.')
+argp.add_argument('--google_default_creds_use_key_file',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Whether or not we should use a key file for the '
+ 'google_default_credentials test case, e.g. by '
+ 'setting env var GOOGLE_APPLICATION_CREDENTIALS.'))
+argp.add_argument('--prod_servers',
+ choices=prod_servers.keys(),
+ default=['default'],
+ nargs='+',
+ help=('The servers to run cloud_to_prod and '
+ 'cloud_to_prod_auth tests against.'))
+argp.add_argument('-s',
+ '--server',
+ choices=['all'] + sorted(_SERVERS),
+ nargs='+',
+ help='Run cloud_to_cloud servers in a separate docker ' +
+ 'image. Servers can only be started automatically if ' +
+ '--use_docker option is enabled.',
+ default=[])
+argp.add_argument(
+ '--override_server',
+ action='append',
+ type=lambda kv: kv.split('='),
+ help=
+ 'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+ default=[])
+# TODO(jtattermusch): the default service_account_key_file only works when --use_docker is used.
+argp.add_argument(
+ '--service_account_key_file',
+ type=str,
+ help='The service account key file to use for some auth interop tests.',
+ default='/root/service_account/grpc-testing-ebe7c1ac7381.json')
+argp.add_argument(
+ '--default_service_account',
+ type=str,
+ help='Default GCE service account email to use for some auth interop tests.',
+ default='830293263384-compute@developer.gserviceaccount.com')
+argp.add_argument('-t',
+ '--travis',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument('-v',
+ '--verbose',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument(
+ '--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the interop tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help=
+ 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument('--manual_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Prepare things for running interop tests manually. ' +
+ 'Preserve docker images after building them and skip '
+ 'actually running the tests. Only print commands to run by ' +
+ 'hand.')
+argp.add_argument(
+ '--http2_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument(
+ '--http2_server_interop',
+ default=False,
+ action='store_const',
+ const=True,
+ help=
+ 'Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+)
+argp.add_argument('--transport_security',
+ choices=_TRANSPORT_SECURITY_OPTIONS,
+ default='tls',
+ type=str,
+ nargs='?',
+ const=True,
+ help='Which transport security mechanism to use.')
+argp.add_argument(
+ '--custom_credentials_type',
+ choices=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
+ default=_CUSTOM_CREDENTIALS_TYPE_OPTIONS,
+ nargs='+',
+ help=
+ 'Credential types to test in the cloud_to_prod setup. Default is to test with all creds types possible.'
+)
+argp.add_argument(
+ '--skip_compute_engine_creds',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Skip auth tests requiring access to compute engine credentials.')
+argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help=(
+ '(Deprecated, has no effect) Put reports into subdirectories to improve '
+ 'presentation of results by Internal CI.'))
+argp.add_argument('--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+args = argp.parse_args()
+
+servers = set(s for s in itertools.chain.from_iterable(
+ _SERVERS if x == 'all' else [x] for x in args.server))
+# ALTS servers are only available for certain languages.
+if args.transport_security == 'alts':
+ servers = servers.intersection(_SERVERS_FOR_ALTS_TEST_CASES)
+
+if args.use_docker:
+ if not args.travis:
+ print('Seen --use_docker flag, will run interop tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
+
+if args.manual_run and not args.use_docker:
+ print('--manual_run is only supported with --use_docker option enabled.')
+ sys.exit(1)
+
+if not args.use_docker and servers:
+ print(
+ 'Running interop servers is only supported with --use_docker option enabled.'
+ )
+ sys.exit(1)
+
+# we want to include everything but objc in 'all'
+# because objc won't run on non-mac platforms
+all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
+languages = set(_LANGUAGES[l] for l in itertools.chain.from_iterable(
+ all_but_objc if x == 'all' else [x] for x in args.language))
+# ALTS interop clients are only available for certain languages.
+if args.transport_security == 'alts':
+ alts_languages = set(_LANGUAGES[l] for l in _LANGUAGES_FOR_ALTS_TEST_CASES)
+ languages = languages.intersection(alts_languages)
+
+languages_http2_clients_for_http2_server_interop = set()
+if args.http2_server_interop:
+ languages_http2_clients_for_http2_server_interop = set(
+ _LANGUAGES[l]
+ for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
+ if 'all' in args.language or l in args.language)
+
+http2Interop = Http2Client() if args.http2_interop else None
+http2InteropServer = Http2Server() if args.http2_server_interop else None
+
+docker_images = {}
+if args.use_docker:
+ # languages for which to build docker images
+ languages_to_build = set(_LANGUAGES[k]
+ for k in set([str(l) for l in languages] +
+ [s for s in servers]))
+ languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
+
+ if args.http2_interop:
+ languages_to_build.add(http2Interop)
+
+ if args.http2_server_interop:
+ languages_to_build.add(http2InteropServer)
+
+ build_jobs = []
+ for l in languages_to_build:
+ if str(l) == 'objc':
+ # we don't need to build a docker image for objc
+ continue
+ job = build_interop_image_jobspec(l)
+ docker_images[str(l)] = job.tag
+ build_jobs.append(job)
+
+ if build_jobs:
+ jobset.message('START',
+ 'Building interop docker images.',
+ do_newline=True)
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+
+ num_failures, build_resultset = jobset.run(build_jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs)
+
+ report_utils.render_junit_xml_report(build_resultset,
+ _DOCKER_BUILD_XML_REPORT)
+
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'All docker images built successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED',
+ 'Failed to build interop docker images.',
+ do_newline=True)
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
+
+server_manual_cmd_log = [] if args.manual_run else None
+client_manual_cmd_log = [] if args.manual_run else None
+
+# Start interop servers.
+server_jobs = {}
+server_addresses = {}
+try:
+ for s in servers:
+ lang = str(s)
+ spec = server_jobspec(_LANGUAGES[lang],
+ docker_images.get(lang),
+ args.transport_security,
+ manual_cmd_log=server_manual_cmd_log)
+ if not args.manual_run:
+ job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = job
+ server_addresses[lang] = ('localhost',
+ job.mapped_port(_DEFAULT_SERVER_PORT))
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ http2_server_job = None
+ if args.http2_server_interop:
+ # launch a HTTP2 server emulator that creates edge cases
+ lang = str(http2InteropServer)
+ spec = server_jobspec(http2InteropServer,
+ docker_images.get(lang),
+ manual_cmd_log=server_manual_cmd_log)
+ if not args.manual_run:
+ http2_server_job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = http2_server_job
+ else:
+ # don't run the server, set server port to a placeholder value
+ server_addresses[lang] = ('localhost', '${SERVER_PORT}')
+
+ jobs = []
+ if args.cloud_to_prod:
+ if args.transport_security not in ['tls']:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_nickname in args.prod_servers:
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION + _SKIP_SPECIAL_STATUS_MESSAGE:
+ for transport_security in args.custom_credentials_type:
+ # google_default_credentials not yet supported by all languages
+ if transport_security == 'google_default_credentials' and str(
+ language) not in [
+ 'c++', 'go', 'java', 'javaokhttp'
+ ]:
+ continue
+ # compute_engine_channel_creds not yet supported by all languages
+ if transport_security == 'compute_engine_channel_creds' and str(
+ language) not in [
+ 'go', 'java', 'javaokhttp'
+ ]:
+ continue
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_nickname,
+ prod_servers[server_host_nickname],
+ google_default_creds_use_key_file=args.
+ google_default_creds_use_key_file,
+ docker_image=docker_images.get(
+ str(language)),
+ manual_cmd_log=client_manual_cmd_log,
+ service_account_key_file=args.
+ service_account_key_file,
+ default_service_account=args.
+ default_service_account,
+ transport_security=transport_security)
+ jobs.append(test_job)
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ test_job = cloud_to_prod_jobspec(
+ http2Interop,
+ test_case,
+ server_host_nickname,
+ prod_servers[server_host_nickname],
+ google_default_creds_use_key_file=args.
+ google_default_creds_use_key_file,
+ docker_image=docker_images.get(str(http2Interop)),
+ manual_cmd_log=client_manual_cmd_log,
+ service_account_key_file=args.service_account_key_file,
+ default_service_account=args.default_service_account,
+ transport_security=args.transport_security)
+ jobs.append(test_job)
+
+ if args.cloud_to_prod_auth:
+ if args.transport_security not in ['tls']:
+ print('TLS is always enabled for cloud_to_prod scenarios.')
+ for server_host_nickname in args.prod_servers:
+ for language in languages:
+ for test_case in _AUTH_TEST_CASES:
+ if (not args.skip_compute_engine_creds or
+ not compute_engine_creds_required(
+ language, test_case)):
+ if not test_case in language.unimplemented_test_cases():
+ if test_case == _GOOGLE_DEFAULT_CREDS_TEST_CASE:
+ transport_security = 'google_default_credentials'
+ elif test_case == _COMPUTE_ENGINE_CHANNEL_CREDS_TEST_CASE:
+ transport_security = 'compute_engine_channel_creds'
+ else:
+ transport_security = 'tls'
+ if transport_security not in args.custom_credentials_type:
+ continue
+ test_job = cloud_to_prod_jobspec(
+ language,
+ test_case,
+ server_host_nickname,
+ prod_servers[server_host_nickname],
+ google_default_creds_use_key_file=args.
+ google_default_creds_use_key_file,
+ docker_image=docker_images.get(str(language)),
+ auth=True,
+ manual_cmd_log=client_manual_cmd_log,
+ service_account_key_file=args.
+ service_account_key_file,
+ default_service_account=args.
+ default_service_account,
+ transport_security=transport_security)
+ jobs.append(test_job)
+ for server in args.override_server:
+ server_name = server[0]
+ (server_host, server_port) = server[1].split(':')
+ server_addresses[server_name] = (server_host, server_port)
+
+ for server_name, server_address in server_addresses.items():
+ (server_host, server_port) = server_address
+ server_language = _LANGUAGES.get(server_name, None)
+ skip_server = [] # test cases unimplemented by server
+ if server_language:
+ skip_server = server_language.unimplemented_test_cases_server()
+ for language in languages:
+ for test_case in _TEST_CASES:
+ if not test_case in language.unimplemented_test_cases():
+ if not test_case in skip_server:
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ transport_security=args.transport_security,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_interop:
+ for test_case in _HTTP2_TEST_CASES:
+ if server_name == "go":
+ # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
+ continue
+ test_job = cloud_to_cloud_jobspec(
+ http2Interop,
+ test_case,
+ server_name,
+ server_host,
+ server_port,
+ docker_image=docker_images.get(str(http2Interop)),
+ transport_security=args.transport_security,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if args.http2_server_interop:
+ if not args.manual_run:
+ http2_server_job.wait_for_healthy(timeout_seconds=600)
+ for language in languages_http2_clients_for_http2_server_interop:
+ for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
+ _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+ for language in languages:
+ # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+ # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+ # than specialized http2 clients, reusing existing test implementations.
+ # For example, in the "data_frame_padding" test, use language's gRPC
+ # interop clients and make them think that they're running "large_unary"
+ # test case. This avoids implementing a new test case in each language.
+ for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ if test_case not in language.unimplemented_test_cases():
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT + offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ if args.transport_security != 'insecure':
+ print(
+ ('Creating grpc client to http2 server test case '
+ 'with insecure connection, even though '
+ 'args.transport_security is not insecure. Http2 '
+ 'test server only supports insecure connections.'))
+ test_job = cloud_to_cloud_jobspec(
+ language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ transport_security='insecure',
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
+
+ if not jobs:
+ print('No jobs to run.')
+ for image in six.itervalues(docker_images):
+ dockerjob.remove_image(image, skip_nonexistent=True)
+ sys.exit(1)
+
+ if args.manual_run:
+ print('All tests will skipped --manual_run option is active.')
+
+ if args.verbose:
+ print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+
+ num_failures, resultset = jobset.run(jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs,
+ skip_jobs=args.manual_run)
+ if args.bq_result_table and resultset:
+ upload_interop_results_to_bq(resultset, args.bq_result_table)
+ if num_failures:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ else:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+
+ write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
+ write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+
+ report_utils.render_junit_xml_report(resultset, _TESTS_XML_REPORT)
+
+ for name, job in resultset.items():
+ if "http2" in name:
+ job[0].http2results = aggregate_http2_results(job[0].message)
+
+ http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
+ if args.http2_server_interop else [])
+
+ if num_failures:
+ sys.exit(1)
+ else:
+ sys.exit(0)
+finally:
+ # Check if servers are still running.
+ for server, job in server_jobs.items():
+ if not job.is_running():
+ print('Server "%s" has exited prematurely.' % server)
+
+ dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+
+ for image in six.itervalues(docker_images):
+ if not args.manual_run:
+ print('Removing docker image %s' % image)
+ dockerjob.remove_image(image)
+ else:
+ print('Preserving docker image: %s' % image)
diff --git a/grpc/tools/run_tests/run_microbenchmark.py b/grpc/tools/run_tests/run_microbenchmark.py
new file mode 100755
index 00000000..4b9cd4bc
--- /dev/null
+++ b/grpc/tools/run_tests/run_microbenchmark.py
@@ -0,0 +1,261 @@
+#!/usr/bin/env python
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cgi
+import multiprocessing
+import os
+import subprocess
+import sys
+import argparse
+
+import python_utils.jobset as jobset
+import python_utils.start_port_server as start_port_server
+
+sys.path.append(
+ os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling',
+ 'microbenchmarks', 'bm_diff'))
+import bm_constants
+
+flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+if not os.path.exists('reports'):
+ os.makedirs('reports')
+
+start_port_server.start_port_server()
+
+
+def fnize(s):
+ out = ''
+ for c in s:
+ if c in '<>, /':
+ if len(out) and out[-1] == '_': continue
+ out += '_'
+ else:
+ out += c
+ return out
+
+
+# index html
+index_html = """
+<html>
+<head>
+<title>Microbenchmark Results</title>
+</head>
+<body>
+"""
+
+
+def heading(name):
+ global index_html
+ index_html += "<h1>%s</h1>\n" % name
+
+
+def link(txt, tgt):
+ global index_html
+ index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
+ tgt, quote=True), cgi.escape(txt))
+
+
+def text(txt):
+ global index_html
+ index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+
+
+def collect_latency(bm_name, args):
+ """generate latency profiles"""
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+
+ heading('Latency Profiles: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=basicprof', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ for line in subprocess.check_output(
+ ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.txt' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec([
+ 'bins/basicprof/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05'
+ ],
+ environ={
+ 'GRPC_LATENCY_TRACE': '%s.trace' % fnize(line)
+ },
+ shortname='profile-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec([
+ sys.executable,
+ 'tools/profiling/latency_profile/profile_analyzer.py',
+ '--source',
+ '%s.trace' % fnize(line), '--fmt', 'simple', '--out',
+ 'reports/%s.txt' % fnize(line)
+ ],
+ timeout_seconds=20 * 60,
+ shortname='analyze-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
+ # periodically flush out the list of jobs: profile_analysis jobs at least
+ # consume upwards of five gigabytes of ram in some cases, and so analysing
+ # hundreds of them at once is impractical -- but we want at least some
+ # concurrency or the work takes too long
+ if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(benchmarks,
+ maxjobs=max(1,
+ multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
+
+def collect_perf(bm_name, args):
+ """generate flamegraphs"""
+ heading('Flamegraphs: %s' % bm_name)
+ subprocess.check_call([
+ 'make', bm_name, 'CONFIG=mutrace', '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ for line in subprocess.check_output(
+ ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+ link(line, '%s.svg' % fnize(line))
+ benchmarks.append(
+ jobset.JobSpec([
+ 'perf', 'record', '-o',
+ '%s-perf.data' % fnize(line), '-g', '-F', '997',
+ 'bins/mutrace/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
+ ],
+ shortname='perf-%s' % fnize(line)))
+ profile_analysis.append(
+ jobset.JobSpec(
+ [
+ 'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
+ ],
+ environ={
+ 'PERF_BASE_NAME': fnize(line),
+ 'OUTPUT_DIR': 'reports',
+ 'OUTPUT_FILENAME': fnize(line),
+ },
+ shortname='flame-%s' % fnize(line)))
+ cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+ cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+ # periodically flush out the list of jobs: temporary space required for this
+ # processing is large
+ if len(benchmarks) >= 20:
+ # run up to half the cpu count: each benchmark can use up to two cores
+ # (one for the microbenchmark, one for the data flush)
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+ benchmarks = []
+ profile_analysis = []
+ cleanup = []
+ # run the remaining benchmarks that weren't flushed
+ if len(benchmarks):
+ jobset.run(benchmarks, maxjobs=1)
+ jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+ jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
+
+def run_summary(bm_name, cfg, base_json_name):
+ subprocess.check_call([
+ 'make', bm_name,
+ 'CONFIG=%s' % cfg, '-j',
+ '%d' % multiprocessing.cpu_count()
+ ])
+ cmd = [
+ 'bins/%s/%s' % (cfg, bm_name),
+ '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
+ '--benchmark_out_format=json'
+ ]
+ if args.summary_time is not None:
+ cmd += ['--benchmark_min_time=%d' % args.summary_time]
+ return subprocess.check_output(cmd)
+
+
+def collect_summary(bm_name, args):
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary(bm_name, 'opt', bm_name))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary(bm_name, 'counters', bm_name))
+ if args.bigquery_upload:
+ with open('%s.csv' % bm_name, 'w') as f:
+ f.write(
+ subprocess.check_output([
+ 'tools/profiling/microbenchmarks/bm2bq.py',
+ '%s.counters.json' % bm_name,
+ '%s.opt.json' % bm_name
+ ]))
+ subprocess.check_call([
+ 'bq', 'load', 'microbenchmarks.microbenchmarks',
+ '%s.csv' % bm_name
+ ])
+
+
+collectors = {
+ 'latency': collect_latency,
+ 'perf': collect_perf,
+ 'summary': collect_summary,
+}
+
+argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
+argp.add_argument('-c',
+ '--collect',
+ choices=sorted(collectors.keys()),
+ nargs='*',
+ default=sorted(collectors.keys()),
+ help='Which collectors should be run against each benchmark')
+argp.add_argument('-b',
+ '--benchmarks',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ nargs='+',
+ type=str,
+ help='Which microbenchmarks should be run')
+argp.add_argument('--bigquery_upload',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Upload results from summary collection to bigquery')
+argp.add_argument(
+ '--summary_time',
+ default=None,
+ type=int,
+ help='Minimum time to run benchmarks for the summary collection')
+args = argp.parse_args()
+
+try:
+ for collect in args.collect:
+ for bm_name in args.benchmarks:
+ collectors[collect](bm_name, args)
+finally:
+ if not os.path.exists('reports'):
+ os.makedirs('reports')
+ index_html += "</body>\n</html>\n"
+ with open('reports/index.html', 'w') as f:
+ f.write(index_html)
diff --git a/grpc/tools/run_tests/run_performance_tests.py b/grpc/tools/run_tests/run_performance_tests.py
new file mode 100755
index 00000000..0845a712
--- /dev/null
+++ b/grpc/tools/run_tests/run_performance_tests.py
@@ -0,0 +1,719 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run performance tests locally or remotely."""
+
+from __future__ import print_function
+
+import argparse
+import collections
+import itertools
+import json
+import multiprocessing
+import os
+import pipes
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import uuid
+import six
+
+import performance.scenario_config as scenario_config
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(_ROOT)
+
+_REMOTE_HOST_USERNAME = 'jenkins'
+
+_SCENARIO_TIMEOUT = 3 * 60
+_WORKER_TIMEOUT = 3 * 60
+_NETPERF_TIMEOUT = 60
+_QUIT_WORKER_TIMEOUT = 2 * 60
+
+
+class QpsWorkerJob:
+ """Encapsulates a qps worker server job."""
+
+ def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
+ self._spec = spec
+ self.language = language
+ self.host_and_port = host_and_port
+ self._job = None
+ self.perf_file_base_name = perf_file_base_name
+
+ def start(self):
+ self._job = jobset.Job(self._spec,
+ newline_on_success=True,
+ travis=True,
+ add_env={})
+
+ def is_running(self):
+ """Polls a job and returns True if given job is still running."""
+ return self._job and self._job.state() == jobset._RUNNING
+
+ def kill(self):
+ if self._job:
+ self._job.kill()
+ self._job = None
+
+
+def create_qpsworker_job(language,
+ shortname=None,
+ port=10000,
+ remote_host=None,
+ perf_cmd=None):
+ cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+
+ if remote_host:
+ host_and_port = '%s:%s' % (remote_host, port)
+ else:
+ host_and_port = 'localhost:%s' % port
+
+ perf_file_base_name = None
+ if perf_cmd:
+ perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+ # specify -o output file so perf.data gets collected when worker stopped
+ cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
+ ] + cmdline
+
+ worker_timeout = _WORKER_TIMEOUT
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ ssh_cmd = ['ssh']
+ cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
+ ssh_cmd.extend([
+ str(user_at_host),
+ 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)
+ ])
+ cmdline = ssh_cmd
+
+ jobspec = jobset.JobSpec(
+ cmdline=cmdline,
+ shortname=shortname,
+ timeout_seconds=
+ worker_timeout, # workers get restarted after each scenario
+ verbose_success=True)
+ return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
+
+
+def create_scenario_jobspec(scenario_json,
+ workers,
+ remote_host=None,
+ bq_result_table=None,
+ server_cpu_load=0):
+ """Runs one scenario using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ cmd += 'tools/run_tests/performance/run_qps_driver.sh '
+ cmd += '--scenarios_json=%s ' % pipes.quote(
+ json.dumps({'scenarios': [scenario_json]}))
+ cmd += '--scenario_result_file=scenario_result.json '
+ if server_cpu_load != 0:
+ cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(cmdline=[cmd],
+ shortname='%s' % scenario_json['name'],
+ timeout_seconds=_SCENARIO_TIMEOUT,
+ shell=True,
+ verbose_success=True)
+
+
+def create_quit_jobspec(workers, remote_host=None):
+ """Runs quit using QPS driver."""
+ # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+ cmd = 'QPS_WORKERS="%s" cmake/build/qps_json_driver --quit' % ','.join(
+ w.host_and_port for w in workers)
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(cmdline=[cmd],
+ shortname='shutdown_workers',
+ timeout_seconds=_QUIT_WORKER_TIMEOUT,
+ shell=True,
+ verbose_success=True)
+
+
+def create_netperf_jobspec(server_host='localhost',
+ client_host=None,
+ bq_result_table=None):
+ """Runs netperf benchmark."""
+ cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ if client_host:
+ # If netperf is running remotely, the env variables populated by Jenkins
+ # won't be available on the client, but we need them for uploading results
+ # to BigQuery.
+ jenkins_job_name = os.getenv('JOB_NAME')
+ if jenkins_job_name:
+ cmd += 'JOB_NAME="%s" ' % jenkins_job_name
+ jenkins_build_number = os.getenv('BUILD_NUMBER')
+ if jenkins_build_number:
+ cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
+
+ cmd += 'tools/run_tests/performance/run_netperf.sh'
+ if client_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+ user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(cmdline=[cmd],
+ shortname='netperf',
+ timeout_seconds=_NETPERF_TIMEOUT,
+ shell=True,
+ verbose_success=True)
+
+
+def archive_repo(languages):
+ """Archives local version of repo including submodules."""
+ cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
+ if 'java' in languages:
+ cmdline.append('../grpc-java')
+ if 'go' in languages:
+ cmdline.append('../grpc-go')
+ if 'node' in languages or 'node_purejs' in languages:
+ cmdline.append('../grpc-node')
+
+ archive_job = jobset.JobSpec(cmdline=cmdline,
+ shortname='archive_repo',
+ timeout_seconds=3 * 60)
+
+ jobset.message('START', 'Archiving local repository.', do_newline=True)
+ num_failures, _ = jobset.run([archive_job],
+ newline_on_success=True,
+ maxjobs=1)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'Archive with local repository created successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED',
+ 'Failed to archive local repository.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def prepare_remote_hosts(hosts, prepare_local=False):
+ """Prepares remote hosts (and maybe prepare localhost as well)."""
+ prepare_timeout = 10 * 60
+ prepare_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+ shortname='remote_host_prepare.%s' % host,
+ environ={'USER_AT_HOST': user_at_host},
+ timeout_seconds=prepare_timeout))
+ if prepare_local:
+ # Prepare localhost as well
+ prepare_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/kill_workers.sh'],
+ shortname='local_prepare',
+ timeout_seconds=prepare_timeout))
+ jobset.message('START', 'Preparing hosts.', do_newline=True)
+ num_failures, _ = jobset.run(prepare_jobs,
+ newline_on_success=True,
+ maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'Prepare step completed successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED',
+ 'Failed to prepare remote hosts.',
+ do_newline=True)
+ sys.exit(1)
+
+
+def build_on_remote_hosts(hosts,
+ languages=scenario_config.LANGUAGES.keys(),
+ build_local=False):
+ """Builds performance worker on remote hosts (and maybe also locally)."""
+ build_timeout = 45 * 60
+ # Kokoro VMs (which are local only) do not have caching, so they need more time to build
+ local_build_timeout = 60 * 60
+ build_jobs = []
+ for host in hosts:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
+ languages,
+ shortname='remote_host_build.%s' % host,
+ environ={
+ 'USER_AT_HOST': user_at_host,
+ 'CONFIG': 'opt'
+ },
+ timeout_seconds=build_timeout))
+ if build_local:
+ # start port server locally
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['python', 'tools/run_tests/start_port_server.py'],
+ shortname='local_start_port_server',
+ timeout_seconds=2 * 60))
+ # Build locally as well
+ build_jobs.append(
+ jobset.JobSpec(
+ cmdline=['tools/run_tests/performance/build_performance.sh'] +
+ languages,
+ shortname='local_build',
+ environ={'CONFIG': 'opt'},
+ timeout_seconds=local_build_timeout))
+ jobset.message('START', 'Building.', do_newline=True)
+ num_failures, _ = jobset.run(build_jobs,
+ newline_on_success=True,
+ maxjobs=10)
+ if num_failures == 0:
+ jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Build failed.', do_newline=True)
+ sys.exit(1)
+
+
+def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
+ """Creates QPS workers (but does not start them)."""
+ if not worker_hosts:
+ # run two workers locally (for each language)
+ workers = [(None, 10000), (None, 10010)]
+ elif len(worker_hosts) == 1:
+ # run two workers on the remote host (for each language)
+ workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+ else:
+ # run one worker per each remote host (for each language)
+ workers = [(worker_host, 10000) for worker_host in worker_hosts]
+
+ return [
+ create_qpsworker_job(language,
+ shortname='qps_worker_%s_%s' %
+ (language, worker_idx),
+ port=worker[1] + language.worker_port_offset(),
+ remote_host=worker[0],
+ perf_cmd=perf_cmd)
+ for language in languages
+ for worker_idx, worker in enumerate(workers)
+ ]
+
+
+def perf_report_processor_job(worker_host, perf_base_name, output_filename,
+ flame_graph_reports):
+ print('Creating perf report collection job for %s' % worker_host)
+ cmd = ''
+ if worker_host != 'localhost':
+ user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+ cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s tools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
+ user_at_host, output_filename, flame_graph_reports, perf_base_name)
+ else:
+ cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s tools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
+ output_filename, flame_graph_reports, perf_base_name)
+
+ return jobset.JobSpec(cmdline=cmd,
+ timeout_seconds=3 * 60,
+ shell=True,
+ verbose_success=True,
+ shortname='process perf report')
+
+
+Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
+
+
+def create_scenarios(languages,
+ workers_by_lang,
+ remote_host=None,
+ regex='.*',
+ category='all',
+ bq_result_table=None,
+ netperf=False,
+ netperf_hosts=[],
+ server_cpu_load=0):
+ """Create jobspecs for scenarios to run."""
+ all_workers = [
+ worker for workers in workers_by_lang.values() for worker in workers
+ ]
+ scenarios = []
+ _NO_WORKERS = []
+
+ if netperf:
+ if not netperf_hosts:
+ netperf_server = 'localhost'
+ netperf_client = None
+ elif len(netperf_hosts) == 1:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[0]
+ else:
+ netperf_server = netperf_hosts[0]
+ netperf_client = netperf_hosts[1]
+ scenarios.append(
+ Scenario(
+ create_netperf_jobspec(server_host=netperf_server,
+ client_host=netperf_client,
+ bq_result_table=bq_result_table),
+ _NO_WORKERS, 'netperf'))
+
+ for language in languages:
+ for scenario_json in language.scenarios():
+ if re.search(regex, scenario_json['name']):
+ categories = scenario_json.get('CATEGORIES',
+ ['scalable', 'smoketest'])
+ if category in categories or category == 'all':
+ workers = workers_by_lang[str(language)][:]
+ # 'SERVER_LANGUAGE' is an indicator for this script to pick
+ # a server in different language.
+ custom_server_lang = scenario_json.get(
+ 'SERVER_LANGUAGE', None)
+ custom_client_lang = scenario_json.get(
+ 'CLIENT_LANGUAGE', None)
+ scenario_json = scenario_config.remove_nonproto_fields(
+ scenario_json)
+ if custom_server_lang and custom_client_lang:
+ raise Exception(
+ 'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
+ 'in the same scenario')
+ if custom_server_lang:
+ if not workers_by_lang.get(custom_server_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'SERVER_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_server_lang)
+ continue
+ for idx in range(0, scenario_json['num_servers']):
+ # replace first X workers by workers of a different language
+ workers[idx] = workers_by_lang[custom_server_lang][
+ idx]
+ if custom_client_lang:
+ if not workers_by_lang.get(custom_client_lang, []):
+ print('Warning: Skipping scenario %s as' %
+ scenario_json['name'])
+ print(
+ 'CLIENT_LANGUAGE is set to %s yet the language has '
+ 'not been selected with -l' %
+ custom_client_lang)
+ continue
+ for idx in range(scenario_json['num_servers'],
+ len(workers)):
+ # replace all client workers by workers of a different language,
+ # leave num_server workers as they are server workers.
+ workers[idx] = workers_by_lang[custom_client_lang][
+ idx]
+ scenario = Scenario(
+ create_scenario_jobspec(
+ scenario_json, [w.host_and_port for w in workers],
+ remote_host=remote_host,
+ bq_result_table=bq_result_table,
+ server_cpu_load=server_cpu_load), workers,
+ scenario_json['name'])
+ scenarios.append(scenario)
+
+ return scenarios
+
+
+def finish_qps_workers(jobs, qpsworker_jobs):
+ """Waits for given jobs to finish and eventually kills them."""
+ retries = 0
+ num_killed = 0
+ while any(job.is_running() for job in jobs):
+ for job in qpsworker_jobs:
+ if job.is_running():
+ print('QPS worker "%s" is still running.' % job.host_and_port)
+ if retries > 10:
+ print('Killing all QPS workers.')
+ for job in jobs:
+ job.kill()
+ num_killed += 1
+ retries += 1
+ time.sleep(3)
+ print('All QPS workers finished.')
+ return num_killed
+
+
+profile_output_files = []
+
+
+# Collect perf text reports and flamegraphs if perf_cmd was used
+# Note the base names of perf text reports are used when creating and processing
+# perf data. The scenario name uniqifies the output name in the final
+# perf reports directory.
+# Alos, the perf profiles need to be fetched and processed after each scenario
+# in order to avoid clobbering the output files.
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
+ flame_graph_reports):
+ perf_report_jobs = []
+ global profile_output_files
+ for host_and_port in hosts_and_base_names:
+ perf_base_name = hosts_and_base_names[host_and_port]
+ output_filename = '%s-%s' % (scenario_name, perf_base_name)
+ # from the base filename, create .svg output filename
+ host = host_and_port.split(':')[0]
+ profile_output_files.append('%s.svg' % output_filename)
+ perf_report_jobs.append(
+ perf_report_processor_job(host, perf_base_name, output_filename,
+ flame_graph_reports))
+
+ jobset.message('START',
+ 'Collecting perf reports from qps workers',
+ do_newline=True)
+ failures, _ = jobset.run(perf_report_jobs,
+ newline_on_success=True,
+ maxjobs=1)
+ jobset.message('SUCCESS',
+ 'Collecting perf reports from qps workers',
+ do_newline=True)
+ return failures
+
+
+def main():
+ argp = argparse.ArgumentParser(description='Run performance tests.')
+ argp.add_argument('-l',
+ '--language',
+ choices=['all'] +
+ sorted(scenario_config.LANGUAGES.keys()),
+ nargs='+',
+ required=True,
+ help='Languages to benchmark.')
+ argp.add_argument(
+ '--remote_driver_host',
+ default=None,
+ help=
+ 'Run QPS driver on given host. By default, QPS driver is run locally.')
+ argp.add_argument('--remote_worker_host',
+ nargs='+',
+ default=[],
+ help='Worker hosts where to start QPS workers.')
+ argp.add_argument(
+ '--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Just list scenarios to be run, but don\'t run them.')
+ argp.add_argument('-r',
+ '--regex',
+ default='.*',
+ type=str,
+ help='Regex to select scenarios to run.')
+ argp.add_argument('--bq_result_table',
+ default=None,
+ type=str,
+ help='Bigquery "dataset.table" to upload results to.')
+ argp.add_argument('--category',
+ choices=['smoketest', 'all', 'scalable', 'sweep'],
+ default='all',
+ help='Select a category of tests to run.')
+ argp.add_argument('--netperf',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run netperf benchmark as one of the scenarios.')
+ argp.add_argument(
+ '--server_cpu_load',
+ default=0,
+ type=int,
+ help='Select a targeted server cpu load to run. 0 means ignore this flag'
+ )
+ argp.add_argument('-x',
+ '--xml_report',
+ default='report.xml',
+ type=str,
+ help='Name of XML report file to generate.')
+ argp.add_argument(
+ '--perf_args',
+ help=('Example usage: "--perf_args=record -F 99 -g". '
+ 'Wrap QPS workers in a perf command '
+ 'with the arguments to perf specified here. '
+ '".svg" flame graph profiles will be '
+ 'created for each Qps Worker on each scenario. '
+ 'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+ 'directory. Output files from running the worker '
+ 'under perf are saved in the repo root where its ran. '
+ 'Note that the perf "-g" flag is necessary for '
+ 'flame graphs generation to work (assuming the binary '
+ 'being profiled uses frame pointers, check out '
+ '"--call-graph dwarf" option using libunwind otherwise.) '
+ 'Also note that the entire "--perf_args=<arg(s)>" must '
+ 'be wrapped in quotes as in the example usage. '
+ 'If the "--perg_args" is unspecified, "perf" will '
+ 'not be used at all. '
+ 'See http://www.brendangregg.com/perf.html '
+ 'for more general perf examples.'))
+ argp.add_argument(
+ '--skip_generate_flamegraphs',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Turn flame graph generation off. '
+ 'May be useful if "perf_args" arguments do not make sense for '
+ 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+ argp.add_argument(
+ '-f',
+ '--flame_graph_reports',
+ default='perf_reports',
+ type=str,
+ help=
+ 'Name of directory to output flame graph profiles to, if any are created.'
+ )
+ argp.add_argument(
+ '-u',
+ '--remote_host_username',
+ default='',
+ type=str,
+ help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
+
+ args = argp.parse_args()
+
+ global _REMOTE_HOST_USERNAME
+ if args.remote_host_username:
+ _REMOTE_HOST_USERNAME = args.remote_host_username
+
+ languages = set(
+ scenario_config.LANGUAGES[l] for l in itertools.chain.from_iterable(
+ six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
+ for x in args.language))
+
+ # Put together set of remote hosts where to run and build
+ remote_hosts = set()
+ if args.remote_worker_host:
+ for host in args.remote_worker_host:
+ remote_hosts.add(host)
+ if args.remote_driver_host:
+ remote_hosts.add(args.remote_driver_host)
+
+ if not args.dry_run:
+ if remote_hosts:
+ archive_repo(languages=[str(l) for l in languages])
+ prepare_remote_hosts(remote_hosts, prepare_local=True)
+ else:
+ prepare_remote_hosts([], prepare_local=True)
+
+ build_local = False
+ if not args.remote_driver_host:
+ build_local = True
+ if not args.dry_run:
+ build_on_remote_hosts(remote_hosts,
+ languages=[str(l) for l in languages],
+ build_local=build_local)
+
+ perf_cmd = None
+ if args.perf_args:
+ print('Running workers under perf profiler')
+ # Expect /usr/bin/perf to be installed here, as is usual
+ perf_cmd = ['/usr/bin/perf']
+ perf_cmd.extend(re.split('\s+', args.perf_args))
+
+ qpsworker_jobs = create_qpsworkers(languages,
+ args.remote_worker_host,
+ perf_cmd=perf_cmd)
+
+ # get list of worker addresses for each language.
+ workers_by_lang = dict([(str(language), []) for language in languages])
+ for job in qpsworker_jobs:
+ workers_by_lang[str(job.language)].append(job)
+
+ scenarios = create_scenarios(languages,
+ workers_by_lang=workers_by_lang,
+ remote_host=args.remote_driver_host,
+ regex=args.regex,
+ category=args.category,
+ bq_result_table=args.bq_result_table,
+ netperf=args.netperf,
+ netperf_hosts=args.remote_worker_host,
+ server_cpu_load=args.server_cpu_load)
+
+ if not scenarios:
+ raise Exception('No scenarios to run')
+
+ total_scenario_failures = 0
+ qps_workers_killed = 0
+ merged_resultset = {}
+ perf_report_failures = 0
+
+ for scenario in scenarios:
+ if args.dry_run:
+ print(scenario.name)
+ else:
+ scenario_failures = 0
+ try:
+ for worker in scenario.workers:
+ worker.start()
+ jobs = [scenario.jobspec]
+ if scenario.workers:
+ # TODO(jtattermusch): ideally the "quit" job won't show up
+ # in the report
+ jobs.append(
+ create_quit_jobspec(
+ scenario.workers,
+ remote_host=args.remote_driver_host))
+ scenario_failures, resultset = jobset.run(
+ jobs, newline_on_success=True, maxjobs=1)
+ total_scenario_failures += scenario_failures
+ merged_resultset = dict(
+ itertools.chain(six.iteritems(merged_resultset),
+ six.iteritems(resultset)))
+ finally:
+ # Consider qps workers that need to be killed as failures
+ qps_workers_killed += finish_qps_workers(
+ scenario.workers, qpsworker_jobs)
+
+ if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+ workers_and_base_names = {}
+ for worker in scenario.workers:
+ if not worker.perf_file_base_name:
+ raise Exception(
+ 'using perf buf perf report filename is unspecified'
+ )
+ workers_and_base_names[
+ worker.host_and_port] = worker.perf_file_base_name
+ perf_report_failures += run_collect_perf_profile_jobs(
+ workers_and_base_names, scenario.name,
+ args.flame_graph_reports)
+
+ # Still write the index.html even if some scenarios failed.
+ # 'profile_output_files' will only have names for scenarios that passed
+ if perf_cmd and not args.skip_generate_flamegraphs:
+ # write the index fil to the output dir, with all profiles from all scenarios/workers
+ report_utils.render_perf_profiling_results(
+ '%s/index.html' % args.flame_graph_reports, profile_output_files)
+
+ report_utils.render_junit_xml_report(merged_resultset,
+ args.xml_report,
+ suite_name='benchmarks',
+ multi_target=True)
+
+ if total_scenario_failures > 0 or qps_workers_killed > 0:
+ print('%s scenarios failed and %s qps worker jobs killed' %
+ (total_scenario_failures, qps_workers_killed))
+ sys.exit(1)
+
+ if perf_report_failures > 0:
+ print('%s perf profile collection jobs failed' % perf_report_failures)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/grpc/tools/run_tests/run_tests.py b/grpc/tools/run_tests/run_tests.py
new file mode 100755
index 00000000..b1c56762
--- /dev/null
+++ b/grpc/tools/run_tests/run_tests.py
@@ -0,0 +1,1981 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run tests in parallel."""
+
+from __future__ import print_function
+
+import argparse
+import ast
+import collections
+import glob
+import itertools
+import json
+import logging
+import multiprocessing
+import os
+import os.path
+import pipes
+import platform
+import random
+import re
+import socket
+import subprocess
+import sys
+import tempfile
+import traceback
+import time
+from six.moves import urllib
+import uuid
+import six
+
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+import python_utils.watch_dirs as watch_dirs
+import python_utils.start_port_server as start_port_server
+try:
+ from python_utils.upload_test_results import upload_results_to_bq
+except (ImportError):
+ pass # It's ok to not import because this is only necessary to upload results to BQ.
+
+gcp_utils_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(_ROOT)
+
+_FORCE_ENVIRON_FOR_WRAPPERS = {
+ 'GRPC_VERBOSITY': 'DEBUG',
+}
+
+_POLLING_STRATEGIES = {
+ 'linux': ['epollex', 'epoll1', 'poll'],
+ 'mac': ['poll'],
+}
+
+BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
+
+
+def get_bqtest_data(limit=None):
+ import big_query_utils
+
+ bq = big_query_utils.create_big_query()
+ query = """
+SELECT
+ filtered_test_name,
+ SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
+ MAX(cpu_measured) + 0.01 as cpu
+ FROM (
+ SELECT
+ REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
+ result, cpu_measured
+ FROM
+ [grpc-testing:jenkins_test_results.aggregate_results]
+ WHERE
+ timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
+ AND platform = '""" + platform_string() + """'
+ AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
+GROUP BY
+ filtered_test_name"""
+ if limit:
+ query += " limit {}".format(limit)
+ query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+ page = bq.jobs().getQueryResults(
+ pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+ test_data = [
+ BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
+ float(row['f'][2]['v'])) for row in page['rows']
+ ]
+ return test_data
+
+
+def platform_string():
+ return jobset.platform_string()
+
+
+_DEFAULT_TIMEOUT_SECONDS = 5 * 60
+_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
+
+
+def run_shell_command(cmd, env=None, cwd=None):
+ try:
+ subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
+ except subprocess.CalledProcessError as e:
+ logging.exception(
+ "Error while running command '%s'. Exit status %d. Output:\n%s",
+ e.cmd, e.returncode, e.output)
+ raise
+
+
+def max_parallel_tests_for_current_platform():
+ # Too much test parallelization has only been seen to be a problem
+ # so far on windows.
+ if jobset.platform_string() == 'windows':
+ return 64
+ return 1024
+
+
+# SimpleConfig: just compile with CONFIG=config, and run the binary to test
+class Config(object):
+
+ def __init__(self,
+ config,
+ environ=None,
+ timeout_multiplier=1,
+ tool_prefix=[],
+ iomgr_platform='native'):
+ if environ is None:
+ environ = {}
+ self.build_config = config
+ self.environ = environ
+ self.environ['CONFIG'] = config
+ self.tool_prefix = tool_prefix
+ self.timeout_multiplier = timeout_multiplier
+ self.iomgr_platform = iomgr_platform
+
+ def job_spec(self,
+ cmdline,
+ timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
+ shortname=None,
+ environ={},
+ cpu_cost=1.0,
+ flaky=False):
+ """Construct a jobset.JobSpec for a test under this config
+
+ Args:
+ cmdline: a list of strings specifying the command line the test
+ would like to run
+ """
+ actual_environ = self.environ.copy()
+ for k, v in environ.items():
+ actual_environ[k] = v
+ if not flaky and shortname and shortname in flaky_tests:
+ flaky = True
+ if shortname in shortname_to_cpu:
+ cpu_cost = shortname_to_cpu[shortname]
+ return jobset.JobSpec(
+ cmdline=self.tool_prefix + cmdline,
+ shortname=shortname,
+ environ=actual_environ,
+ cpu_cost=cpu_cost,
+ timeout_seconds=(self.timeout_multiplier *
+ timeout_seconds if timeout_seconds else None),
+ flake_retries=4 if flaky or args.allow_flakes else 0,
+ timeout_retries=1 if flaky or args.allow_flakes else 0)
+
+
+def get_c_tests(travis, test_lang):
+ out = []
+ platforms_str = 'ci_platforms' if travis else 'platforms'
+ with open('tools/run_tests/generated/tests.json') as f:
+ js = json.load(f)
+ return [
+ tgt for tgt in js
+ if tgt['language'] == test_lang and platform_string() in
+ tgt[platforms_str] and not (travis and tgt['flaky'])
+ ]
+
+
+def _check_compiler(compiler, supported_compilers):
+ if compiler not in supported_compilers:
+ raise Exception('Compiler %s not supported (on this platform).' %
+ compiler)
+
+
+def _check_arch(arch, supported_archs):
+ if arch not in supported_archs:
+ raise Exception('Architecture %s not supported.' % arch)
+
+
+def _is_use_docker_child():
+ """Returns True if running running as a --use_docker child."""
+ return True if os.getenv('RUN_TESTS_COMMAND') else False
+
+
+_PythonConfigVars = collections.namedtuple('_ConfigVars', [
+ 'shell',
+ 'builder',
+ 'builder_prefix_arguments',
+ 'venv_relative_python',
+ 'toolchain',
+ 'runner',
+ 'test_name',
+ 'iomgr_platform',
+])
+
+
+def _python_config_generator(name, major, minor, bits, config_vars):
+ name += '_' + config_vars.iomgr_platform
+ return PythonConfig(
+ name, config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments +
+ [_python_pattern_function(major=major, minor=minor, bits=bits)] +
+ [name] + config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner + [
+ os.path.join(name, config_vars.venv_relative_python[0]),
+ config_vars.test_name
+ ])
+
+
+def _pypy_config_generator(name, major, config_vars):
+ return PythonConfig(
+ name, config_vars.shell + config_vars.builder +
+ config_vars.builder_prefix_arguments +
+ [_pypy_pattern_function(major=major)] + [name] +
+ config_vars.venv_relative_python + config_vars.toolchain,
+ config_vars.shell + config_vars.runner +
+ [os.path.join(name, config_vars.venv_relative_python[0])])
+
+
+def _python_pattern_function(major, minor, bits):
+ # Bit-ness is handled by the test machine's environment
+ if os.name == "nt":
+ if bits == "64":
+ return '/c/Python{major}{minor}/python.exe'.format(major=major,
+ minor=minor,
+ bits=bits)
+ else:
+ return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+ major=major, minor=minor, bits=bits)
+ else:
+ return 'python{major}.{minor}'.format(major=major, minor=minor)
+
+
+def _pypy_pattern_function(major):
+ if major == '2':
+ return 'pypy'
+ elif major == '3':
+ return 'pypy3'
+ else:
+ raise ValueError("Unknown PyPy major version")
+
+
+class CLanguage(object):
+
+ def __init__(self, make_target, test_lang):
+ self.make_target = make_target
+ self.platform = platform_string()
+ self.test_lang = test_lang
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ if self.platform == 'windows':
+ _check_compiler(
+ self.args.compiler,
+ ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
+ _check_arch(self.args.arch, ['default', 'x64', 'x86'])
+ self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
+ self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
+ self._use_cmake = True
+ self._make_options = []
+ elif self.args.compiler == 'cmake':
+ _check_arch(self.args.arch, ['default'])
+ self._use_cmake = True
+ self._docker_distro = 'jessie'
+ self._make_options = []
+ else:
+ self._use_cmake = False
+ self._docker_distro, self._make_options = self._compiler_options(
+ self.args.use_docker, self.args.compiler)
+ if args.iomgr_platform == "uv":
+ cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
+ try:
+ cflags += subprocess.check_output(
+ ['pkg-config', '--cflags', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ try:
+ ldflags = subprocess.check_output(
+ ['pkg-config', '--libs', 'libuv']).strip() + ' '
+ except (subprocess.CalledProcessError, OSError):
+ ldflags = '-luv '
+ self._make_options += [
+ 'EXTRA_CPPFLAGS={}'.format(cflags),
+ 'EXTRA_LDLIBS={}'.format(ldflags)
+ ]
+
+ def test_specs(self):
+ out = []
+ binaries = get_c_tests(self.args.travis, self.test_lang)
+ for target in binaries:
+ if self._use_cmake and target.get('boringssl', False):
+ # cmake doesn't build boringssl tests
+ continue
+ auto_timeout_scaling = target.get('auto_timeout_scaling', True)
+ polling_strategies = (_POLLING_STRATEGIES.get(
+ self.platform, ['all']) if target.get('uses_polling', True) else
+ ['none'])
+ if self.args.iomgr_platform == 'uv':
+ polling_strategies = ['all']
+ for polling_strategy in polling_strategies:
+ env = {
+ 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+ _ROOT + '/src/core/tsi/test_creds/ca.pem',
+ 'GRPC_POLL_STRATEGY':
+ polling_strategy,
+ 'GRPC_VERBOSITY':
+ 'DEBUG'
+ }
+ resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
+ if resolver:
+ env['GRPC_DNS_RESOLVER'] = resolver
+ shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+ if polling_strategy in target.get('excluded_poll_engines', []):
+ continue
+
+ timeout_scaling = 1
+ if auto_timeout_scaling:
+ config = self.args.config
+ if ('asan' in config or config == 'msan' or
+ config == 'tsan' or config == 'ubsan' or
+ config == 'helgrind' or config == 'memcheck'):
+ # Scale overall test timeout if running under various sanitizers.
+ # scaling value is based on historical data analysis
+ timeout_scaling *= 3
+
+ if self.config.build_config in target['exclude_configs']:
+ continue
+ if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
+ continue
+ if self.platform == 'windows':
+ binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
+ self.config.build_config], target['name'])
+ else:
+ if self._use_cmake:
+ binary = 'cmake/build/%s' % target['name']
+ else:
+ binary = 'bins/%s/%s' % (self.config.build_config,
+ target['name'])
+ cpu_cost = target['cpu_cost']
+ if cpu_cost == 'capacity':
+ cpu_cost = multiprocessing.cpu_count()
+ if os.path.isfile(binary):
+ list_test_command = None
+ filter_test_command = None
+
+ # these are the flag defined by gtest and benchmark framework to list
+ # and filter test runs. We use them to split each individual test
+ # into its own JobSpec, and thus into its own process.
+ if 'benchmark' in target and target['benchmark']:
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--benchmark_list_tests'],
+ stderr=fnull)
+ for line in tests.split('\n'):
+ test = line.strip()
+ if not test: continue
+ cmdline = [binary,
+ '--benchmark_filter=%s$' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' %
+ (' '.join(cmdline), shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=target.get(
+ 'timeout_seconds',
+ _DEFAULT_TIMEOUT_SECONDS) *
+ timeout_scaling,
+ environ=env))
+ elif 'gtest' in target and target['gtest']:
+ # here we parse the output of --gtest_list_tests to build up a complete
+ # list of the tests contained in a binary for each test, we then
+ # add a job to run, filtering for just that test.
+ with open(os.devnull, 'w') as fnull:
+ tests = subprocess.check_output(
+ [binary, '--gtest_list_tests'], stderr=fnull)
+ base = None
+ for line in tests.split('\n'):
+ i = line.find('#')
+ if i >= 0: line = line[:i]
+ if not line: continue
+ if line[0] != ' ':
+ base = line.strip()
+ else:
+ assert base is not None
+ assert line[1] == ' '
+ test = base + line.strip()
+ cmdline = [binary,
+ '--gtest_filter=%s' % test
+ ] + target['args']
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='%s %s' %
+ (' '.join(cmdline), shortname_ext),
+ cpu_cost=cpu_cost,
+ timeout_seconds=target.get(
+ 'timeout_seconds',
+ _DEFAULT_TIMEOUT_SECONDS) *
+ timeout_scaling,
+ environ=env))
+ else:
+ cmdline = [binary] + target['args']
+ shortname = target.get(
+ 'shortname',
+ ' '.join(pipes.quote(arg) for arg in cmdline))
+ shortname += shortname_ext
+ out.append(
+ self.config.job_spec(
+ cmdline,
+ shortname=shortname,
+ cpu_cost=cpu_cost,
+ flaky=target.get('flaky', False),
+ timeout_seconds=target.get(
+ 'timeout_seconds',
+ _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
+ environ=env))
+ elif self.args.regex == '.*' or self.platform == 'windows':
+ print('\nWARNING: binary not found, skipping', binary)
+ return sorted(out)
+
+ def make_targets(self):
+ if self.platform == 'windows':
+ # don't build tools on windows just yet
+ return ['buildtests_%s' % self.make_target]
+ return [
+ 'buildtests_%s' % self.make_target,
+ 'tools_%s' % self.make_target, 'check_epollexclusive'
+ ]
+
+ def make_options(self):
+ return self._make_options
+
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+ self._cmake_generator_option, self._cmake_arch_option
+ ]]
+ elif self._use_cmake:
+ return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
+ else:
+ return []
+
+ def build_steps(self):
+ return []
+
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+
+ def makefile_name(self):
+ if self._use_cmake:
+ return 'cmake/build/Makefile'
+ else:
+ return 'Makefile'
+
+ def _clang_make_options(self, version_suffix=''):
+ if self.args.config == 'ubsan':
+ return [
+ 'CC=clang%s' % version_suffix,
+ 'CXX=clang++%s' % version_suffix,
+ 'LD=clang++%s' % version_suffix,
+ 'LDXX=clang++%s' % version_suffix
+ ]
+
+ return [
+ 'CC=clang%s' % version_suffix,
+ 'CXX=clang++%s' % version_suffix,
+ 'LD=clang%s' % version_suffix,
+ 'LDXX=clang++%s' % version_suffix
+ ]
+
+ def _gcc_make_options(self, version_suffix):
+ return [
+ 'CC=gcc%s' % version_suffix,
+ 'CXX=g++%s' % version_suffix,
+ 'LD=gcc%s' % version_suffix,
+ 'LDXX=g++%s' % version_suffix
+ ]
+
+ def _compiler_options(self, use_docker, compiler):
+ """Returns docker distro and make options to use for given compiler."""
+ if not use_docker and not _is_use_docker_child():
+ _check_compiler(compiler, ['default'])
+
+ if compiler == 'gcc4.9' or compiler == 'default':
+ return ('jessie', [])
+ elif compiler == 'gcc5.3':
+ return ('ubuntu1604', [])
+ elif compiler == 'gcc7.4':
+ return ('ubuntu1804', [])
+ elif compiler == 'gcc8.3':
+ return ('buster', [])
+ elif compiler == 'gcc_musl':
+ return ('alpine', [])
+ elif compiler == 'clang3.4':
+ # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
+ return ('ubuntu1404', self._clang_make_options())
+ elif compiler == 'clang3.5':
+ return ('jessie', self._clang_make_options(version_suffix='-3.5'))
+ elif compiler == 'clang3.6':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.6'))
+ elif compiler == 'clang3.7':
+ return ('ubuntu1604',
+ self._clang_make_options(version_suffix='-3.7'))
+ elif compiler == 'clang7.0':
+ # clang++-7.0 alias doesn't exist and there are no other clang versions
+ # installed.
+ return ('sanitizers_jessie', self._clang_make_options())
+ else:
+ raise Exception('Compiler %s not supported.' % compiler)
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/cxx_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
+
+ def __str__(self):
+ return self.make_target
+
+
+# This tests Node on grpc/grpc-node and will become the standard for Node testing
+class RemoteNodeLanguage(object):
+
+ def __init__(self):
+ self.platform = platform_string()
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ # Note: electron ABI only depends on major and minor version, so that's all
+ # we should specify in the compiler argument
+ _check_compiler(self.args.compiler, [
+ 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
+ 'electron1.3', 'electron1.6'
+ ])
+ if self.args.compiler == 'default':
+ self.runtime = 'node'
+ self.node_version = '8'
+ else:
+ if self.args.compiler.startswith('electron'):
+ self.runtime = 'electron'
+ self.node_version = self.args.compiler[8:]
+ else:
+ self.runtime = 'node'
+ # Take off the word "node"
+ self.node_version = self.args.compiler[4:]
+
+ # TODO: update with Windows/electron scripts when available for grpc/grpc-node
+ def test_specs(self):
+ if self.platform == 'windows':
+ return [
+ self.config.job_spec(
+ ['tools\\run_tests\\helper_scripts\\run_node.bat'])
+ ]
+ else:
+ return [
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
+ None,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return []
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
+
+ def __str__(self):
+ return 'grpc-node'
+
+
+class PhpLanguage(object):
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return ['static_c', 'shared_c']
+
+ def make_options(self):
+ return self._make_options
+
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
+
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
+
+ def __str__(self):
+ return 'php'
+
+
+class Php7Language(object):
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+ self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+
+ def test_specs(self):
+ return [
+ self.config.job_spec(['src/php/bin/run_tests.sh'],
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return ['static_c', 'shared_c']
+
+ def make_options(self):
+ return self._make_options
+
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_php.sh']]
+
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
+
+ def __str__(self):
+ return 'php7'
+
+
+class PythonConfig(
+ collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
+ """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+
+
+class PythonLanguage(object):
+
+ _TEST_SPECS_FILE = {
+ 'native': 'src/python/grpcio_tests/tests/tests.json',
+ 'gevent': 'src/python/grpcio_tests/tests/tests.json',
+ 'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
+ }
+ _TEST_FOLDER = {
+ 'native': 'test',
+ 'gevent': 'test',
+ 'asyncio': 'test_aio',
+ }
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ self.pythons = self._get_pythons(self.args)
+
+ def test_specs(self):
+ # load list of known test suites
+ with open(self._TEST_SPECS_FILE[
+ self.args.iomgr_platform]) as tests_json_file:
+ tests_json = json.load(tests_json_file)
+ environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
+ # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
+ # designed for non-native IO manager. It has a side-effect that
+ # overrides threading settings in C-Core.
+ if args.iomgr_platform != 'native':
+ environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
+ return [
+ self.config.job_spec(
+ config.run,
+ timeout_seconds=5 * 60,
+ environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
+ **environment),
+ shortname='%s.%s.%s' %
+ (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
+ suite_name),
+ ) for suite_name in tests_json for config in self.pythons
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [config.build for config in self.pythons]
+
+ def post_tests_steps(self):
+ if self.config.build_config != 'gcov':
+ return []
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/python_%s_%s' % (
+ self._python_manager_name(), _docker_arch_suffix(self.args.arch))
+
+ def _python_manager_name(self):
+ """Choose the docker image to use based on python version."""
+ if self.args.compiler in [
+ 'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
+ ]:
+ return 'stretch_' + self.args.compiler[len('python'):]
+ elif self.args.compiler == 'python_alpine':
+ return 'alpine'
+ else:
+ return 'stretch_default'
+
+ def _get_pythons(self, args):
+ """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
+ if args.arch == 'x86':
+ bits = '32'
+ else:
+ bits = '64'
+
+ if os.name == 'nt':
+ shell = ['bash']
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python_msys2.sh')
+ ]
+ builder_prefix_arguments = ['MINGW{}'.format(bits)]
+ venv_relative_python = ['Scripts/python.exe']
+ toolchain = ['mingw32']
+ else:
+ shell = []
+ builder = [
+ os.path.abspath(
+ 'tools/run_tests/helper_scripts/build_python.sh')
+ ]
+ builder_prefix_arguments = []
+ venv_relative_python = ['bin/python']
+ toolchain = ['unix']
+
+ # Selects the corresponding testing mode.
+ # See src/python/grpcio_tests/commands.py for implementation details.
+ if args.iomgr_platform == 'native':
+ test_command = 'test_lite'
+ elif args.iomgr_platform == 'gevent':
+ test_command = 'test_gevent'
+ elif args.iomgr_platform == 'asyncio':
+ test_command = 'test_aio'
+ else:
+ raise ValueError('Unsupported IO Manager platform: %s' %
+ args.iomgr_platform)
+ runner = [
+ os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
+ ]
+
+ config_vars = _PythonConfigVars(shell, builder,
+ builder_prefix_arguments,
+ venv_relative_python, toolchain, runner,
+ test_command, args.iomgr_platform)
+ python27_config = _python_config_generator(name='py27',
+ major='2',
+ minor='7',
+ bits=bits,
+ config_vars=config_vars)
+ python35_config = _python_config_generator(name='py35',
+ major='3',
+ minor='5',
+ bits=bits,
+ config_vars=config_vars)
+ python36_config = _python_config_generator(name='py36',
+ major='3',
+ minor='6',
+ bits=bits,
+ config_vars=config_vars)
+ python37_config = _python_config_generator(name='py37',
+ major='3',
+ minor='7',
+ bits=bits,
+ config_vars=config_vars)
+ python38_config = _python_config_generator(name='py38',
+ major='3',
+ minor='8',
+ bits=bits,
+ config_vars=config_vars)
+ pypy27_config = _pypy_config_generator(name='pypy',
+ major='2',
+ config_vars=config_vars)
+ pypy32_config = _pypy_config_generator(name='pypy3',
+ major='3',
+ config_vars=config_vars)
+
+ if args.iomgr_platform == 'asyncio':
+ if args.compiler not in ('default', 'python3.6', 'python3.7',
+ 'python3.8'):
+ raise Exception(
+ 'Compiler %s not supported with IO Manager platform: %s' %
+ (args.compiler, args.iomgr_platform))
+
+ if args.compiler == 'default':
+ if os.name == 'nt':
+ return (python36_config,)
+ else:
+ if args.iomgr_platform == 'asyncio':
+ return (python36_config,)
+ elif os.uname()[0] == 'Darwin':
+ # NOTE(rbellevi): Testing takes significantly longer on
+ # MacOS, so we restrict the number of interpreter versions
+ # tested.
+ return (
+ python27_config,
+ python36_config,
+ python37_config,
+ )
+ else:
+ return (
+ python27_config,
+ python35_config,
+ python36_config,
+ python37_config,
+ )
+ elif args.compiler == 'python2.7':
+ return (python27_config,)
+ elif args.compiler == 'python3.5':
+ return (python35_config,)
+ elif args.compiler == 'python3.6':
+ return (python36_config,)
+ elif args.compiler == 'python3.7':
+ return (python37_config,)
+ elif args.compiler == 'python3.8':
+ return (python38_config,)
+ elif args.compiler == 'pypy':
+ return (pypy27_config,)
+ elif args.compiler == 'pypy3':
+ return (pypy32_config,)
+ elif args.compiler == 'python_alpine':
+ return (python27_config,)
+ elif args.compiler == 'all_the_cpythons':
+ return (
+ python27_config,
+ python35_config,
+ python36_config,
+ python37_config,
+ python38_config,
+ )
+ else:
+ raise Exception('Compiler %s not supported.' % args.compiler)
+
+ def __str__(self):
+ return 'python'
+
+
+class RubyLanguage(object):
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+
+ def test_specs(self):
+ tests = [
+ self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
+ timeout_seconds=10 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+ ]
+ tests.append(
+ self.config.job_spec(
+ ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
+ timeout_seconds=20 * 60,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return tests
+
+ def pre_build_steps(self):
+ return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return [['tools/run_tests/helper_scripts/build_ruby.sh']]
+
+ def post_tests_steps(self):
+ return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
+ self.args.arch)
+
+ def __str__(self):
+ return 'ruby'
+
+
+class CSharpLanguage(object):
+
+ def __init__(self):
+ self.platform = platform_string()
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ if self.platform == 'windows':
+ _check_compiler(self.args.compiler, ['default', 'coreclr'])
+ _check_arch(self.args.arch, ['default'])
+ self._cmake_arch_option = 'x64'
+ else:
+ _check_compiler(self.args.compiler, ['default', 'coreclr'])
+ self._docker_distro = 'stretch'
+
+ def test_specs(self):
+ with open('src/csharp/tests.json') as f:
+ tests_by_assembly = json.load(f)
+
+ msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
+ nunit_args = ['--labels=All', '--noresult', '--workers=1']
+ assembly_subdir = 'bin/%s' % msbuild_config
+ assembly_extension = '.exe'
+
+ if self.args.compiler == 'coreclr':
+ assembly_subdir += '/netcoreapp2.1'
+ runtime_cmd = ['dotnet', 'exec']
+ assembly_extension = '.dll'
+ else:
+ assembly_subdir += '/net45'
+ if self.platform == 'windows':
+ runtime_cmd = []
+ elif self.platform == 'mac':
+ # mono before version 5.2 on MacOS defaults to 32bit runtime
+ runtime_cmd = ['mono', '--arch=64']
+ else:
+ runtime_cmd = ['mono']
+
+ specs = []
+ for assembly in six.iterkeys(tests_by_assembly):
+ assembly_file = 'src/csharp/%s/%s/%s%s' % (
+ assembly, assembly_subdir, assembly, assembly_extension)
+ if self.config.build_config != 'gcov' or self.platform != 'windows':
+ # normally, run each test as a separate process
+ for test in tests_by_assembly[assembly]:
+ cmdline = runtime_cmd + [assembly_file,
+ '--test=%s' % test] + nunit_args
+ specs.append(
+ self.config.job_spec(
+ cmdline,
+ shortname='csharp.%s' % test,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ else:
+ # For C# test coverage, run all tests from the same assembly at once
+ # using OpenCover.Console (only works on Windows).
+ cmdline = [
+ 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
+ '-target:%s' % assembly_file, '-targetdir:src\\csharp',
+ '-targetargs:%s' % ' '.join(nunit_args),
+ '-filter:+[Grpc.Core]*', '-register:user',
+ '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
+ ]
+
+ # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
+ # to prevent problems with registering the profiler.
+ run_exclusive = 1000000
+ specs.append(
+ self.config.job_spec(cmdline,
+ shortname='csharp.coverage.%s' %
+ assembly,
+ cpu_cost=run_exclusive,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ return specs
+
+ def pre_build_steps(self):
+ if self.platform == 'windows':
+ return [[
+ 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
+ self._cmake_arch_option
+ ]]
+ else:
+ return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
+
+ def make_targets(self):
+ return ['grpc_csharp_ext']
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/build_csharp.sh']]
+
+ def post_tests_steps(self):
+ if self.platform == 'windows':
+ return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
+ else:
+ return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
+
+ def makefile_name(self):
+ if self.platform == 'windows':
+ return 'cmake/build/%s/Makefile' % self._cmake_arch_option
+ else:
+ # no need to set x86 specific flags as run_tests.py
+ # currently forbids x86 C# builds on both Linux and MacOS.
+ return 'cmake/build/Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/csharp_%s_%s' % (
+ self._docker_distro, _docker_arch_suffix(self.args.arch))
+
+ def __str__(self):
+ return 'csharp'
+
+
+class ObjCLanguage(object):
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+
+ def test_specs(self):
+ out = []
+ out.append(
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example_bazel.sh'],
+ timeout_seconds=10 * 60,
+ shortname='ios-buildtest-example-sample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+ 'FRAMEWORKS': 'NO'
+ }))
+ # Currently not supporting compiling as frameworks in Bazel
+ out.append(
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=20 * 60,
+ shortname='ios-buildtest-example-sample-frameworks',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'Sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+ 'FRAMEWORKS': 'YES'
+ }))
+ out.append(
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example.sh'],
+ timeout_seconds=20 * 60,
+ shortname='ios-buildtest-example-switftsample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'SwiftSample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
+ }))
+ out.append(
+ self.config.job_spec(
+ ['src/objective-c/tests/build_one_example_bazel.sh'],
+ timeout_seconds=10 * 60,
+ shortname='ios-buildtest-example-tvOS-sample',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'tvOS-sample',
+ 'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
+ 'FRAMEWORKS': 'NO'
+ }))
+ # Disabled due to #20258
+ # TODO (mxyan): Reenable this test when #20258 is resolved.
+ # out.append(
+ # self.config.job_spec(
+ # ['src/objective-c/tests/build_one_example_bazel.sh'],
+ # timeout_seconds=20 * 60,
+ # shortname='ios-buildtest-example-watchOS-sample',
+ # cpu_cost=1e6,
+ # environ={
+ # 'SCHEME': 'watchOS-sample-WatchKit-App',
+ # 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
+ # 'FRAMEWORKS': 'NO'
+ # }))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
+ timeout_seconds=60 * 60,
+ shortname='ios-test-plugintest',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ out.append(
+ self.config.job_spec(
+ ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
+ timeout_seconds=20 * 60,
+ shortname='ios-test-cfstream-tests',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=60 * 60,
+ shortname='ios-test-unittests',
+ cpu_cost=1e6,
+ environ={'SCHEME': 'UnitTests'}))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=60 * 60,
+ shortname='ios-test-interoptests',
+ cpu_cost=1e6,
+ environ={'SCHEME': 'InteropTests'}))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=60 * 60,
+ shortname='ios-test-cronettests',
+ cpu_cost=1e6,
+ environ={'SCHEME': 'CronetTests'}))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=30 * 60,
+ shortname='ios-perf-test',
+ cpu_cost=1e6,
+ environ={'SCHEME': 'PerfTests'}))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=30 * 60,
+ shortname='ios-perf-test-posix',
+ cpu_cost=1e6,
+ environ={'SCHEME': 'PerfTestsPosix'}))
+ out.append(
+ self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
+ timeout_seconds=30 * 60,
+ shortname='ios-cpp-test-cronet',
+ cpu_cost=1e6,
+ environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=60 * 60,
+ shortname='mac-test-basictests',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'MacTests',
+ 'PLATFORM': 'macos'
+ }))
+ out.append(
+ self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
+ timeout_seconds=30 * 60,
+ shortname='tvos-test-basictests',
+ cpu_cost=1e6,
+ environ={
+ 'SCHEME': 'TvTests',
+ 'PLATFORM': 'tvos'
+ }))
+
+ return sorted(out)
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return []
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return []
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return None
+
+ def __str__(self):
+ return 'objc'
+
+
+class Sanity(object):
+
+ def configure(self, config, args):
+ self.config = config
+ self.args = args
+ _check_compiler(self.args.compiler, ['default'])
+
+ def test_specs(self):
+ import yaml
+ with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+ environ = {'TEST': 'true'}
+ if _is_use_docker_child():
+ environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
+ environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
+ # sanity tests run tools/bazel wrapper concurrently
+ # and that can result in a download/run race in the wrapper.
+ # under docker we already have the right version of bazel
+ # so we can just disable the wrapper.
+ environ['DISABLE_BAZEL_WRAPPER'] = 'true'
+ return [
+ self.config.job_spec(cmd['script'].split(),
+ timeout_seconds=30 * 60,
+ environ=environ,
+ cpu_cost=cmd.get('cpu_cost', 1))
+ for cmd in yaml.load(f)
+ ]
+
+ def pre_build_steps(self):
+ return []
+
+ def make_targets(self):
+ return ['run_dep_checks']
+
+ def make_options(self):
+ return []
+
+ def build_steps(self):
+ return []
+
+ def post_tests_steps(self):
+ return []
+
+ def makefile_name(self):
+ return 'Makefile'
+
+ def dockerfile_dir(self):
+ return 'tools/dockerfile/test/sanity'
+
+ def __str__(self):
+ return 'sanity'
+
+
+# different configurations we can run under
+with open('tools/run_tests/generated/configs.json') as f:
+ _CONFIGS = dict(
+ (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
+
+_LANGUAGES = {
+ 'c++': CLanguage('cxx', 'c++'),
+ 'c': CLanguage('c', 'c'),
+ 'grpc-node': RemoteNodeLanguage(),
+ 'php': PhpLanguage(),
+ 'php7': Php7Language(),
+ 'python': PythonLanguage(),
+ 'ruby': RubyLanguage(),
+ 'csharp': CSharpLanguage(),
+ 'objc': ObjCLanguage(),
+ 'sanity': Sanity()
+}
+
+_MSBUILD_CONFIG = {
+ 'dbg': 'Debug',
+ 'opt': 'Release',
+ 'gcov': 'Debug',
+}
+
+
+def _windows_arch_option(arch):
+ """Returns msbuild cmdline option for selected architecture."""
+ if arch == 'default' or arch == 'x86':
+ return '/p:Platform=Win32'
+ elif arch == 'x64':
+ return '/p:Platform=x64'
+ else:
+ print('Architecture %s not supported.' % arch)
+ sys.exit(1)
+
+
+def _check_arch_option(arch):
+ """Checks that architecture option is valid."""
+ if platform_string() == 'windows':
+ _windows_arch_option(arch)
+ elif platform_string() == 'linux':
+ # On linux, we need to be running under docker with the right architecture.
+ runtime_arch = platform.architecture()[0]
+ if arch == 'default':
+ return
+ elif runtime_arch == '64bit' and arch == 'x64':
+ return
+ elif runtime_arch == '32bit' and arch == 'x86':
+ return
+ else:
+ print(
+ 'Architecture %s does not match current runtime architecture.' %
+ arch)
+ sys.exit(1)
+ else:
+ if args.arch != 'default':
+ print('Architecture %s not supported on current platform.' %
+ args.arch)
+ sys.exit(1)
+
+
+def _docker_arch_suffix(arch):
+ """Returns suffix to dockerfile dir to use."""
+ if arch == 'default' or arch == 'x64':
+ return 'x64'
+ elif arch == 'x86':
+ return 'x86'
+ else:
+ print('Architecture %s not supported with current settings.' % arch)
+ sys.exit(1)
+
+
+def runs_per_test_type(arg_str):
+ """Auxiliary function to parse the "runs_per_test" flag.
+
+ Returns:
+ A positive integer or 0, the latter indicating an infinite number of
+ runs.
+
+ Raises:
+ argparse.ArgumentTypeError: Upon invalid input.
+ """
+ if arg_str == 'inf':
+ return 0
+ try:
+ n = int(arg_str)
+ if n <= 0: raise ValueError
+ return n
+ except:
+ msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
+ raise argparse.ArgumentTypeError(msg)
+
+
+def percent_type(arg_str):
+ pct = float(arg_str)
+ if pct > 100 or pct < 0:
+ raise argparse.ArgumentTypeError(
+ "'%f' is not a valid percentage in the [0, 100] range" % pct)
+ return pct
+
+
+# This is math.isclose in python >= 3.5
+def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
+ return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+
+
+# parse command line
+argp = argparse.ArgumentParser(description='Run grpc tests.')
+argp.add_argument('-c',
+ '--config',
+ choices=sorted(_CONFIGS.keys()),
+ default='opt')
+argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=runs_per_test_type,
+ help='A positive integer or "inf". If "inf", all tests will run in an '
+ 'infinite loop. Especially useful in combination with "-f"')
+argp.add_argument('-r', '--regex', default='.*', type=str)
+argp.add_argument('--regex_exclude', default='', type=str)
+argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
+argp.add_argument('-s', '--slowdown', default=1.0, type=float)
+argp.add_argument('-p',
+ '--sample_percent',
+ default=100.0,
+ type=percent_type,
+ help='Run a random sample with that percentage of tests')
+argp.add_argument('-f',
+ '--forever',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument('-t',
+ '--travis',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument('--newline_on_success',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument('-l',
+ '--language',
+ choices=sorted(_LANGUAGES.keys()),
+ nargs='+',
+ required=True)
+argp.add_argument('-S',
+ '--stop_on_failure',
+ default=False,
+ action='store_const',
+ const=True)
+argp.add_argument('--use_docker',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run all the tests under docker. That provides ' +
+ 'additional isolation and prevents the need to install ' +
+ 'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+ '--allow_flakes',
+ default=False,
+ action='store_const',
+ const=True,
+ help=
+ 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+ '--arch',
+ choices=['default', 'x86', 'x64'],
+ default='default',
+ help=
+ 'Selects architecture to target. For some platforms "default" is the only supported choice.'
+)
+argp.add_argument(
+ '--compiler',
+ choices=[
+ 'default', 'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl',
+ 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0', 'python2.7',
+ 'python3.5', 'python3.6', 'python3.7', 'python3.8', 'pypy', 'pypy3',
+ 'python_alpine', 'all_the_cpythons', 'electron1.3', 'electron1.6',
+ 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
+ ],
+ default='default',
+ help=
+ 'Selects compiler to use. Allowed values depend on the platform and language.'
+)
+argp.add_argument('--iomgr_platform',
+ choices=['native', 'uv', 'gevent', 'asyncio'],
+ default='native',
+ help='Selects iomgr platform to build on')
+argp.add_argument('--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Perform all the build steps but don\'t run any tests.')
+argp.add_argument('--measure_cpu_costs',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Measure the cpu costs of tests')
+argp.add_argument(
+ '--update_submodules',
+ default=[],
+ nargs='*',
+ help=
+ 'Update some submodules before building. If any are updated, also run generate_projects. '
+ +
+ 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
+)
+argp.add_argument('-a', '--antagonists', default=0, type=int)
+argp.add_argument('-x',
+ '--xml_report',
+ default=None,
+ type=str,
+ help='Generates a JUnit-compatible XML report')
+argp.add_argument('--report_suite_name',
+ default='tests',
+ type=str,
+ help='Test suite name to use in generated JUnit XML report')
+argp.add_argument(
+ '--report_multi_target',
+ default=False,
+ const=True,
+ action='store_const',
+ help='Generate separate XML report for each test job (Looks better in UIs).'
+)
+argp.add_argument(
+ '--quiet_success',
+ default=False,
+ action='store_const',
+ const=True,
+ help=
+ 'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ + 'Useful when running many iterations of each test (argument -n).')
+argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Don\'t try to iterate over many polling strategies when they exist')
+argp.add_argument(
+ '--force_use_pollers',
+ default=None,
+ type=str,
+ help='Only use the specified comma-delimited list of polling engines. '
+ 'Example: --force_use_pollers epoll1,poll '
+ ' (This flag has no effect if --force_default_poller flag is also used)')
+argp.add_argument('--max_time',
+ default=-1,
+ type=int,
+ help='Maximum test runtime in seconds')
+argp.add_argument('--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+argp.add_argument(
+ '--auto_set_flakes',
+ default=False,
+ const=True,
+ action='store_const',
+ help=
+ 'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
+)
+args = argp.parse_args()
+
+flaky_tests = set()
+shortname_to_cpu = {}
+if args.auto_set_flakes:
+ try:
+ for test in get_bqtest_data():
+ if test.flaky: flaky_tests.add(test.name)
+ if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
+ except:
+ print("Unexpected error getting flaky tests: %s" %
+ traceback.format_exc())
+
+if args.force_default_poller:
+ _POLLING_STRATEGIES = {}
+elif args.force_use_pollers:
+ _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
+
+jobset.measure_cpu_costs = args.measure_cpu_costs
+
+# update submodules if necessary
+need_to_regenerate_projects = False
+for spec in args.update_submodules:
+ spec = spec.split(':', 1)
+ if len(spec) == 1:
+ submodule = spec[0]
+ branch = 'master'
+ elif len(spec) == 2:
+ submodule = spec[0]
+ branch = spec[1]
+ cwd = 'third_party/%s' % submodule
+
+ def git(cmd, cwd=cwd):
+ print('in %s: git %s' % (cwd, cmd))
+ run_shell_command('git %s' % cmd, cwd=cwd)
+
+ git('fetch')
+ git('checkout %s' % branch)
+ git('pull origin %s' % branch)
+ if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
+ need_to_regenerate_projects = True
+if need_to_regenerate_projects:
+ if jobset.platform_string() == 'linux':
+ run_shell_command('tools/buildgen/generate_projects.sh')
+ else:
+ print(
+ 'WARNING: may need to regenerate projects, but since we are not on')
+ print(
+ ' Linux this step is being skipped. Compilation MAY fail.')
+
+# grab config
+run_config = _CONFIGS[args.config]
+build_config = run_config.build_config
+
+if args.travis:
+ _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
+
+languages = set(_LANGUAGES[l] for l in args.language)
+for l in languages:
+ l.configure(run_config, args)
+
+language_make_options = []
+if any(language.make_options() for language in languages):
+ if not 'gcov' in args.config and len(languages) != 1:
+ print(
+ 'languages with custom make options cannot be built simultaneously with other languages'
+ )
+ sys.exit(1)
+ else:
+ # Combining make options is not clean and just happens to work. It allows C & C++ to build
+ # together, and is only used under gcov. All other configs should build languages individually.
+ language_make_options = list(
+ set([
+ make_option for lang in languages
+ for make_option in lang.make_options()
+ ]))
+
+if args.use_docker:
+ if not args.travis:
+ print('Seen --use_docker flag, will run tests under docker.')
+ print('')
+ print(
+ 'IMPORTANT: The changes you are testing need to be locally committed'
+ )
+ print(
+ 'because only the committed changes in the current branch will be')
+ print('copied to the docker environment.')
+ time.sleep(5)
+
+ dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
+ if len(dockerfile_dirs) > 1:
+ print('Languages to be tested require running under different docker '
+ 'images.')
+ sys.exit(1)
+ else:
+ dockerfile_dir = next(iter(dockerfile_dirs))
+
+ child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
+ run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
+ child_argv[1:])
+
+ env = os.environ.copy()
+ env['RUN_TESTS_COMMAND'] = run_tests_cmd
+ env['DOCKERFILE_DIR'] = dockerfile_dir
+ env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
+ if args.xml_report:
+ env['XML_REPORT'] = args.xml_report
+ if not args.travis:
+ env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
+
+ subprocess.check_call(
+ 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+ shell=True,
+ env=env)
+ sys.exit(0)
+
+_check_arch_option(args.arch)
+
+
+def make_jobspec(cfg, targets, makefile='Makefile'):
+ if platform_string() == 'windows':
+ return [
+ jobset.JobSpec([
+ 'cmake', '--build', '.', '--target',
+ '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
+ ],
+ cwd=os.path.dirname(makefile),
+ timeout_seconds=None) for target in targets
+ ]
+ else:
+ if targets and makefile.startswith('cmake/build/'):
+ # With cmake, we've passed all the build configuration in the pre-build step already
+ return [
+ jobset.JobSpec(
+ [os.getenv('MAKE', 'make'), '-j',
+ '%d' % args.jobs] + targets,
+ cwd='cmake/build',
+ timeout_seconds=None)
+ ]
+ if targets:
+ return [
+ jobset.JobSpec(
+ [
+ os.getenv('MAKE', 'make'), '-f', makefile, '-j',
+ '%d' % args.jobs,
+ 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
+ args.slowdown,
+ 'CONFIG=%s' % cfg, 'Q='
+ ] + language_make_options +
+ ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
+ timeout_seconds=None)
+ ]
+ else:
+ return []
+
+
+make_targets = {}
+for l in languages:
+ makefile = l.makefile_name()
+ make_targets[makefile] = make_targets.get(makefile, set()).union(
+ set(l.make_targets()))
+
+
+def build_step_environ(cfg):
+ environ = {'CONFIG': cfg}
+ msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
+ if msbuild_cfg:
+ environ['MSBUILD_CONFIG'] = msbuild_cfg
+ return environ
+
+
+build_steps = list(
+ set(
+ jobset.JobSpec(cmdline,
+ environ=build_step_environ(build_config),
+ timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
+ flake_retries=2)
+ for l in languages
+ for cmdline in l.pre_build_steps()))
+if make_targets:
+ make_commands = itertools.chain.from_iterable(
+ make_jobspec(build_config, list(targets), makefile)
+ for (makefile, targets) in make_targets.items())
+ build_steps.extend(set(make_commands))
+build_steps.extend(
+ set(
+ jobset.JobSpec(cmdline,
+ environ=build_step_environ(build_config),
+ timeout_seconds=None)
+ for l in languages
+ for cmdline in l.build_steps()))
+
+post_tests_steps = list(
+ set(
+ jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
+ for l in languages
+ for cmdline in l.post_tests_steps()))
+runs_per_test = args.runs_per_test
+forever = args.forever
+
+
+def _shut_down_legacy_server(legacy_server_port):
+ try:
+ version = int(
+ urllib.request.urlopen('http://localhost:%d/version_number' %
+ legacy_server_port,
+ timeout=10).read())
+ except:
+ pass
+ else:
+ urllib.request.urlopen('http://localhost:%d/quitquitquit' %
+ legacy_server_port).read()
+
+
+def _calculate_num_runs_failures(list_of_results):
+ """Calculate number of runs and failures for a particular test.
+
+ Args:
+ list_of_results: (List) of JobResult object.
+ Returns:
+ A tuple of total number of runs and failures.
+ """
+ num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
+ num_failures = 0
+ for jobresult in list_of_results:
+ if jobresult.retries > 0:
+ num_runs += jobresult.retries
+ if jobresult.num_failures > 0:
+ num_failures += jobresult.num_failures
+ return num_runs, num_failures
+
+
+# _build_and_run results
+class BuildAndRunError(object):
+
+ BUILD = object()
+ TEST = object()
+ POST_TEST = object()
+
+
+def _has_epollexclusive():
+ binary = 'bins/%s/check_epollexclusive' % args.config
+ if not os.path.exists(binary):
+ return False
+ try:
+ subprocess.check_call(binary)
+ return True
+ except subprocess.CalledProcessError as e:
+ return False
+ except OSError as e:
+ # For languages other than C and Windows the binary won't exist
+ return False
+
+
+# returns a list of things that failed (or an empty list on success)
+def _build_and_run(check_cancelled,
+ newline_on_success,
+ xml_report=None,
+ build_only=False):
+ """Do one pass of building & running tests."""
+ # build latest sequentially
+ num_failures, resultset = jobset.run(build_steps,
+ maxjobs=1,
+ stop_on_failure=True,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
+ if num_failures:
+ return [BuildAndRunError.BUILD]
+
+ if build_only:
+ if xml_report:
+ report_utils.render_junit_xml_report(
+ resultset, xml_report, suite_name=args.report_suite_name)
+ return []
+
+ if not args.travis and not _has_epollexclusive() and platform_string(
+ ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
+ platform_string()]:
+ print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
+ _POLLING_STRATEGIES[platform_string()].remove('epollex')
+
+ # start antagonists
+ antagonists = [
+ subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
+ for _ in range(0, args.antagonists)
+ ]
+ start_port_server.start_port_server()
+ resultset = None
+ num_test_failures = 0
+ try:
+ infinite_runs = runs_per_test == 0
+ one_run = set(spec for language in languages
+ for spec in language.test_specs()
+ if (re.search(args.regex, spec.shortname) and
+ (args.regex_exclude == '' or
+ not re.search(args.regex_exclude, spec.shortname))))
+ # When running on travis, we want out test runs to be as similar as possible
+ # for reproducibility purposes.
+ if args.travis and args.max_time <= 0:
+ massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
+ else:
+ # whereas otherwise, we want to shuffle things up to give all tests a
+ # chance to run.
+ massaged_one_run = list(
+ one_run) # random.sample needs an indexable seq.
+ num_jobs = len(massaged_one_run)
+ # for a random sample, get as many as indicated by the 'sample_percent'
+ # argument. By default this arg is 100, resulting in a shuffle of all
+ # jobs.
+ sample_size = int(num_jobs * args.sample_percent / 100.0)
+ massaged_one_run = random.sample(massaged_one_run, sample_size)
+ if not isclose(args.sample_percent, 100.0):
+ assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
+ print("Running %d tests out of %d (~%d%%)" %
+ (sample_size, num_jobs, args.sample_percent))
+ if infinite_runs:
+ assert len(massaged_one_run
+ ) > 0, 'Must have at least one test for a -n inf run'
+ runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
+ else itertools.repeat(massaged_one_run, runs_per_test))
+ all_runs = itertools.chain.from_iterable(runs_sequence)
+
+ if args.quiet_success:
+ jobset.message(
+ 'START',
+ 'Running tests quietly, only failing tests will be reported',
+ do_newline=True)
+ num_test_failures, resultset = jobset.run(
+ all_runs,
+ check_cancelled,
+ newline_on_success=newline_on_success,
+ travis=args.travis,
+ maxjobs=args.jobs,
+ maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
+ stop_on_failure=args.stop_on_failure,
+ quiet_success=args.quiet_success,
+ max_time=args.max_time)
+ if resultset:
+ for k, v in sorted(resultset.items()):
+ num_runs, num_failures = _calculate_num_runs_failures(v)
+ if num_failures > 0:
+ if num_failures == num_runs: # what about infinite_runs???
+ jobset.message('FAILED', k, do_newline=True)
+ else:
+ jobset.message('FLAKE',
+ '%s [%d/%d runs flaked]' %
+ (k, num_failures, num_runs),
+ do_newline=True)
+ finally:
+ for antagonist in antagonists:
+ antagonist.kill()
+ if args.bq_result_table and resultset:
+ upload_extra_fields = {
+ 'compiler': args.compiler,
+ 'config': args.config,
+ 'iomgr_platform': args.iomgr_platform,
+ 'language': args.language[
+ 0
+ ], # args.language is a list but will always have one element when uploading to BQ is enabled.
+ 'platform': platform_string()
+ }
+ try:
+ upload_results_to_bq(resultset, args.bq_result_table,
+ upload_extra_fields)
+ except NameError as e:
+ logging.warning(
+ e) # It's fine to ignore since this is not critical
+ if xml_report and resultset:
+ report_utils.render_junit_xml_report(
+ resultset,
+ xml_report,
+ suite_name=args.report_suite_name,
+ multi_target=args.report_multi_target)
+
+ number_failures, _ = jobset.run(post_tests_steps,
+ maxjobs=1,
+ stop_on_failure=False,
+ newline_on_success=newline_on_success,
+ travis=args.travis)
+
+ out = []
+ if number_failures:
+ out.append(BuildAndRunError.POST_TEST)
+ if num_test_failures:
+ out.append(BuildAndRunError.TEST)
+
+ return out
+
+
+if forever:
+ success = True
+ while True:
+ dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
+ initial_time = dw.most_recent_change()
+ have_files_changed = lambda: dw.most_recent_change() != initial_time
+ previous_success = success
+ errors = _build_and_run(check_cancelled=have_files_changed,
+ newline_on_success=False,
+ build_only=args.build_only) == 0
+ if not previous_success and not errors:
+ jobset.message('SUCCESS',
+ 'All tests are now passing properly',
+ do_newline=True)
+ jobset.message('IDLE', 'No change detected')
+ while not have_files_changed():
+ time.sleep(1)
+else:
+ errors = _build_and_run(check_cancelled=lambda: False,
+ newline_on_success=args.newline_on_success,
+ xml_report=args.xml_report,
+ build_only=args.build_only)
+ if not errors:
+ jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+ else:
+ jobset.message('FAILED', 'Some tests failed', do_newline=True)
+ exit_code = 0
+ if BuildAndRunError.BUILD in errors:
+ exit_code |= 1
+ if BuildAndRunError.TEST in errors:
+ exit_code |= 2
+ if BuildAndRunError.POST_TEST in errors:
+ exit_code |= 4
+ sys.exit(exit_code)
diff --git a/grpc/tools/run_tests/run_tests_matrix.py b/grpc/tools/run_tests/run_tests_matrix.py
new file mode 100755
index 00000000..730bae8a
--- /dev/null
+++ b/grpc/tools/run_tests/run_tests_matrix.py
@@ -0,0 +1,577 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run test matrix."""
+
+from __future__ import print_function
+
+import argparse
+import multiprocessing
+import os
+import sys
+
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+from python_utils.filter_pull_request_tests import filter_tests
+
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+os.chdir(_ROOT)
+
+_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
+
+# C/C++ tests can take long time
+_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
+
+# Set timeout high for ObjC for Cocoapods to install pods
+_OBJC_RUNTESTS_TIMEOUT = 90 * 60
+
+# Number of jobs assigned to each run_tests.py instance
+_DEFAULT_INNER_JOBS = 2
+
+# Name of the top-level umbrella report that includes all the run_tests.py invocations
+# Note that the starting letter 't' matters so that the targets are listed AFTER
+# the per-test breakdown items that start with 'run_tests/' (it is more readable that way)
+_MATRIX_REPORT_NAME = 'toplevel_run_tests_invocations'
+
+
+def _safe_report_name(name):
+ """Reports with '+' in target name won't show correctly in ResultStore"""
+ return name.replace('+', 'p')
+
+
+def _report_filename(name):
+ """Generates report file name with directory structure that leads to better presentation by internal CI"""
+ # 'sponge_log.xml' suffix must be there for results to get recognized by kokoro.
+ return '%s/%s' % (_safe_report_name(name), 'sponge_log.xml')
+
+
+def _matrix_job_logfilename(shortname_for_multi_target):
+ """Generate location for log file that will match the sponge_log.xml from the top-level matrix report."""
+ # 'sponge_log.log' suffix must be there for log to get recognized as "target log"
+ # for the corresponding 'sponge_log.xml' report.
+ # the shortname_for_multi_target component must be set to match the sponge_log.xml location
+ # because the top-level render_junit_xml_report is called with multi_target=True
+ return '%s/%s/%s' % (_MATRIX_REPORT_NAME, shortname_for_multi_target,
+ 'sponge_log.log')
+
+
+def _docker_jobspec(name,
+ runtests_args=[],
+ runtests_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
+ """Run a single instance of run_tests.py in a docker container"""
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ shortname = 'run_tests_%s' % name
+ test_job = jobset.JobSpec(cmdline=[
+ 'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', '-j',
+ str(inner_jobs), '-x',
+ 'run_tests/%s' % _report_filename(name), '--report_suite_name',
+ '%s' % _safe_report_name(name)
+ ] + runtests_args,
+ environ=runtests_envs,
+ shortname=shortname,
+ timeout_seconds=timeout_seconds,
+ logfilename=_matrix_job_logfilename(shortname))
+ return test_job
+
+
+def _workspace_jobspec(name,
+ runtests_args=[],
+ workspace_name=None,
+ runtests_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
+ """Run a single instance of run_tests.py in a separate workspace"""
+ if not workspace_name:
+ workspace_name = 'workspace_%s' % name
+ if not timeout_seconds:
+ timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+ shortname = 'run_tests_%s' % name
+ env = {'WORKSPACE_NAME': workspace_name}
+ env.update(runtests_envs)
+ test_job = jobset.JobSpec(cmdline=[
+ 'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+ '-t', '-j',
+ str(inner_jobs), '-x',
+ '../run_tests/%s' % _report_filename(name), '--report_suite_name',
+ '%s' % _safe_report_name(name)
+ ] + runtests_args,
+ environ=env,
+ shortname=shortname,
+ timeout_seconds=timeout_seconds,
+ logfilename=_matrix_job_logfilename(shortname))
+ return test_job
+
+
+def _generate_jobs(languages,
+ configs,
+ platforms,
+ iomgr_platforms=['native'],
+ arch=None,
+ compiler=None,
+ labels=[],
+ extra_args=[],
+ extra_envs={},
+ inner_jobs=_DEFAULT_INNER_JOBS,
+ timeout_seconds=None):
+ result = []
+ for language in languages:
+ for platform in platforms:
+ for iomgr_platform in iomgr_platforms:
+ for config in configs:
+ name = '%s_%s_%s_%s' % (language, platform, config,
+ iomgr_platform)
+ runtests_args = [
+ '-l', language, '-c', config, '--iomgr_platform',
+ iomgr_platform
+ ]
+ if arch or compiler:
+ name += '_%s_%s' % (arch, compiler)
+ runtests_args += [
+ '--arch', arch, '--compiler', compiler
+ ]
+ if '--build_only' in extra_args:
+ name += '_buildonly'
+ for extra_env in extra_envs:
+ name += '_%s_%s' % (extra_env, extra_envs[extra_env])
+
+ runtests_args += extra_args
+ if platform == 'linux':
+ job = _docker_jobspec(name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+ else:
+ job = _workspace_jobspec(
+ name=name,
+ runtests_args=runtests_args,
+ runtests_envs=extra_envs,
+ inner_jobs=inner_jobs,
+ timeout_seconds=timeout_seconds)
+
+ job.labels = [platform, config, language, iomgr_platform
+ ] + labels
+ result.append(job)
+ return result
+
+
+def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
+ test_jobs = []
+ # sanity tests
+ test_jobs += _generate_jobs(languages=['sanity'],
+ configs=['dbg'],
+ platforms=['linux'],
+ labels=['basictests'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ # supported on linux only
+ test_jobs += _generate_jobs(languages=['php7'],
+ configs=['dbg', 'opt'],
+ platforms=['linux'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ # supported on all platforms.
+ test_jobs += _generate_jobs(
+ languages=['c'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'corelang'],
+ extra_args=
+ extra_args, # don't use multi_target report because C has too many test cases
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # C# tests on .NET desktop/mono
+ test_jobs += _generate_jobs(languages=['csharp'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+ # C# tests on .NET core
+ test_jobs += _generate_jobs(languages=['csharp'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos', 'windows'],
+ arch='default',
+ compiler='coreclr',
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(languages=['python'],
+ configs=['opt'],
+ platforms=['linux', 'macos', 'windows'],
+ iomgr_platforms=['native', 'gevent', 'asyncio'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ # supported on linux and mac.
+ test_jobs += _generate_jobs(
+ languages=['c++'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'corelang'],
+ extra_args=
+ extra_args, # don't use multi_target report because C++ has too many test cases
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
+ configs=['dbg', 'opt'],
+ platforms=['linux', 'macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ # supported on mac only.
+ test_jobs += _generate_jobs(languages=['objc'],
+ configs=['opt'],
+ platforms=['macos'],
+ labels=['basictests', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs,
+ timeout_seconds=_OBJC_RUNTESTS_TIMEOUT)
+
+ return test_jobs
+
+
+def _create_portability_test_jobs(extra_args=[],
+ inner_jobs=_DEFAULT_INNER_JOBS):
+ test_jobs = []
+ # portability C x86
+ test_jobs += _generate_jobs(languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x86',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C and C++ on x64
+ for compiler in [
+ 'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc_musl', 'clang3.5',
+ 'clang3.6', 'clang3.7', 'clang7.0'
+ ]:
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='x64',
+ compiler=compiler,
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # portability C on Windows 64-bit (x86 is the default)
+ test_jobs += _generate_jobs(languages=['c'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ inner_jobs=inner_jobs)
+
+ # portability C++ on Windows
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(languages=['c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='default',
+ compiler='default',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # portability C and C++ on Windows using VS2017 (build only)
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['windows'],
+ arch='x64',
+ compiler='cmake_vs2017',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # C and C++ with the c-ares DNS resolver on Linux
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # C and C++ with no-exceptions on Linux
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['noexcept'],
+ platforms=['linux'],
+ labels=['portability', 'corelang'],
+ extra_args=extra_args,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ # TODO(zyc): Turn on this test after adding c-ares support on windows.
+ # C with the c-ares DNS resolver on Windows
+ # test_jobs += _generate_jobs(languages=['c'],
+ # configs=['dbg'], platforms=['windows'],
+ # labels=['portability', 'corelang'],
+ # extra_args=extra_args,
+ # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+
+ # C and C++ build with cmake on Linux
+ # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+ # to make sure it's buildable at least.
+ test_jobs += _generate_jobs(languages=['c', 'c++'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='cmake',
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs)
+
+ test_jobs += _generate_jobs(languages=['python'],
+ configs=['dbg'],
+ platforms=['linux'],
+ arch='default',
+ compiler='python_alpine',
+ labels=['portability', 'multilang'],
+ extra_args=extra_args +
+ ['--report_multi_target'],
+ inner_jobs=inner_jobs)
+
+ # TODO(jtattermusch): a large portion of the libuv tests is failing,
+ # which can end up killing the kokoro job due to gigabytes of error logs
+ # generated. Remove the --build_only flag
+ # once https://github.com/grpc/grpc/issues/17556 is fixed.
+ test_jobs += _generate_jobs(languages=['c'],
+ configs=['dbg'],
+ platforms=['linux'],
+ iomgr_platforms=['uv'],
+ labels=['portability', 'corelang'],
+ extra_args=extra_args + ['--build_only'],
+ inner_jobs=inner_jobs,
+ timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+
+ return test_jobs
+
+
+def _allowed_labels():
+ """Returns a list of existing job labels."""
+ all_labels = set()
+ for job in _create_test_jobs() + _create_portability_test_jobs():
+ for label in job.labels:
+ all_labels.add(label)
+ return sorted(all_labels)
+
+
+def _runs_per_test_type(arg_str):
+ """Auxiliary function to parse the "runs_per_test" flag."""
+ try:
+ n = int(arg_str)
+ if n <= 0: raise ValueError
+ return n
+ except:
+ msg = '\'{}\' is not a positive integer'.format(arg_str)
+ raise argparse.ArgumentTypeError(msg)
+
+
+if __name__ == "__main__":
+ argp = argparse.ArgumentParser(
+ description='Run a matrix of run_tests.py tests.')
+ argp.add_argument('-j',
+ '--jobs',
+ default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of concurrent run_tests.py instances.')
+ argp.add_argument('-f',
+ '--filter',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Filter targets to run by label with AND semantics.')
+ argp.add_argument('--exclude',
+ choices=_allowed_labels(),
+ nargs='+',
+ default=[],
+ help='Exclude targets with any of given labels.')
+ argp.add_argument('--build_only',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --build_only flag to run_tests.py instances.')
+ argp.add_argument(
+ '--force_default_poller',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Pass --force_default_poller to run_tests.py instances.')
+ argp.add_argument('--dry_run',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Only print what would be run.')
+ argp.add_argument(
+ '--filter_pr_tests',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Filters out tests irrelevant to pull request changes.')
+ argp.add_argument(
+ '--base_branch',
+ default='origin/master',
+ type=str,
+ help='Branch that pull request is requesting to merge into')
+ argp.add_argument('--inner_jobs',
+ default=_DEFAULT_INNER_JOBS,
+ type=int,
+ help='Number of jobs in each run_tests.py instance')
+ argp.add_argument(
+ '-n',
+ '--runs_per_test',
+ default=1,
+ type=_runs_per_test_type,
+ help='How many times to run each tests. >1 runs implies ' +
+ 'omitting passing test from the output & reports.')
+ argp.add_argument('--max_time',
+ default=-1,
+ type=int,
+ help='Maximum amount of time to run tests for' +
+ '(other tests will be skipped)')
+ argp.add_argument(
+ '--internal_ci',
+ default=False,
+ action='store_const',
+ const=True,
+ help=
+ '(Deprecated, has no effect) Put reports into subdirectories to improve presentation of '
+ 'results by Kokoro.')
+ argp.add_argument('--bq_result_table',
+ default='',
+ type=str,
+ nargs='?',
+ help='Upload test results to a specified BQ table.')
+ argp.add_argument('--extra_args',
+ default='',
+ type=str,
+ nargs=argparse.REMAINDER,
+ help='Extra test args passed to each sub-script.')
+ args = argp.parse_args()
+
+ extra_args = []
+ if args.build_only:
+ extra_args.append('--build_only')
+ if args.force_default_poller:
+ extra_args.append('--force_default_poller')
+ if args.runs_per_test > 1:
+ extra_args.append('-n')
+ extra_args.append('%s' % args.runs_per_test)
+ extra_args.append('--quiet_success')
+ if args.max_time > 0:
+ extra_args.extend(('--max_time', '%d' % args.max_time))
+ if args.bq_result_table:
+ extra_args.append('--bq_result_table')
+ extra_args.append('%s' % args.bq_result_table)
+ extra_args.append('--measure_cpu_costs')
+ if args.extra_args:
+ extra_args.extend(args.extra_args)
+
+ all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
+ _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
+
+ jobs = []
+ for job in all_jobs:
+ if not args.filter or all(
+ filter in job.labels for filter in args.filter):
+ if not any(exclude_label in job.labels
+ for exclude_label in args.exclude):
+ jobs.append(job)
+
+ if not jobs:
+ jobset.message('FAILED',
+ 'No test suites match given criteria.',
+ do_newline=True)
+ sys.exit(1)
+
+ print('IMPORTANT: The changes you are testing need to be locally committed')
+ print('because only the committed changes in the current branch will be')
+ print('copied to the docker environment or into subworkspaces.')
+
+ skipped_jobs = []
+
+ if args.filter_pr_tests:
+ print('Looking for irrelevant tests to skip...')
+ relevant_jobs = filter_tests(jobs, args.base_branch)
+ if len(relevant_jobs) == len(jobs):
+ print('No tests will be skipped.')
+ else:
+ print('These tests will be skipped:')
+ skipped_jobs = list(set(jobs) - set(relevant_jobs))
+ # Sort by shortnames to make printing of skipped tests consistent
+ skipped_jobs.sort(key=lambda job: job.shortname)
+ for job in list(skipped_jobs):
+ print(' %s' % job.shortname)
+ jobs = relevant_jobs
+
+ print('Will run these tests:')
+ for job in jobs:
+ print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+ print('')
+
+ if args.dry_run:
+ print('--dry_run was used, exiting')
+ sys.exit(1)
+
+ jobset.message('START', 'Running test matrix.', do_newline=True)
+ num_failures, resultset = jobset.run(jobs,
+ newline_on_success=True,
+ travis=True,
+ maxjobs=args.jobs)
+ # Merge skipped tests into results to show skipped tests on report.xml
+ if skipped_jobs:
+ ignored_num_skipped_failures, skipped_results = jobset.run(
+ skipped_jobs, skip_jobs=True)
+ resultset.update(skipped_results)
+ report_utils.render_junit_xml_report(resultset,
+ _report_filename(_MATRIX_REPORT_NAME),
+ suite_name=_MATRIX_REPORT_NAME,
+ multi_target=True)
+
+ if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'All run_tests.py instances finished successfully.',
+ do_newline=True)
+ else:
+ jobset.message('FAILED',
+ 'Some run_tests.py instances have failed.',
+ do_newline=True)
+ sys.exit(1)
diff --git a/grpc/tools/run_tests/run_xds_tests.py b/grpc/tools/run_tests/run_xds_tests.py
new file mode 100755
index 00000000..8f1d9b15
--- /dev/null
+++ b/grpc/tools/run_tests/run_xds_tests.py
@@ -0,0 +1,1278 @@
+#!/usr/bin/env python
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Run xDS integration tests on GCP using Traffic Director."""
+
+import argparse
+import googleapiclient.discovery
+import grpc
+import logging
+import os
+import random
+import shlex
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+from oauth2client.client import GoogleCredentials
+
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+
+from src.proto.grpc.testing import messages_pb2
+from src.proto.grpc.testing import test_pb2_grpc
+
+logger = logging.getLogger()
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s')
+console_handler.setFormatter(formatter)
+logger.handlers = []
+logger.addHandler(console_handler)
+logger.setLevel(logging.WARNING)
+
+_TEST_CASES = [
+ 'backends_restart',
+ 'change_backend_service',
+ 'new_instance_group_receives_traffic',
+ 'ping_pong',
+ 'remove_instance_group',
+ 'round_robin',
+ 'secondary_locality_gets_no_requests_on_partial_primary_failure',
+ 'secondary_locality_gets_requests_on_primary_failure',
+]
+
+
+def parse_test_cases(arg):
+ if arg == 'all':
+ return _TEST_CASES
+ if arg == '':
+ return []
+ test_cases = arg.split(',')
+ if all([test_case in _TEST_CASES for test_case in test_cases]):
+ return test_cases
+ raise Exception('Failed to parse test cases %s' % arg)
+
+
+def parse_port_range(port_arg):
+ try:
+ port = int(port_arg)
+ return range(port, port + 1)
+ except:
+ port_min, port_max = port_arg.split(':')
+ return range(int(port_min), int(port_max) + 1)
+
+
+argp = argparse.ArgumentParser(description='Run xDS interop tests on GCP')
+argp.add_argument('--project_id', help='GCP project id')
+argp.add_argument(
+ '--gcp_suffix',
+ default='',
+ help='Optional suffix for all generated GCP resource names. Useful to '
+ 'ensure distinct names across test runs.')
+argp.add_argument(
+ '--test_case',
+ default='ping_pong',
+ type=parse_test_cases,
+ help='Comma-separated list of test cases to run, or \'all\' to run every '
+ 'test. Available tests: %s' % ' '.join(_TEST_CASES))
+argp.add_argument(
+ '--bootstrap_file',
+ default='',
+ help='File to reference via GRPC_XDS_BOOTSTRAP. Disables built-in '
+ 'bootstrap generation')
+argp.add_argument(
+ '--client_cmd',
+ default=None,
+ help='Command to launch xDS test client. {server_uri}, {stats_port} and '
+ '{qps} references will be replaced using str.format(). GRPC_XDS_BOOTSTRAP '
+ 'will be set for the command')
+argp.add_argument('--zone', default='us-central1-a')
+argp.add_argument('--secondary_zone',
+ default='us-west1-b',
+ help='Zone to use for secondary TD locality tests')
+argp.add_argument('--qps', default=10, type=int, help='Client QPS')
+argp.add_argument(
+ '--wait_for_backend_sec',
+ default=1200,
+ type=int,
+ help='Time limit for waiting for created backend services to report '
+ 'healthy when launching or updated GCP resources')
+argp.add_argument(
+ '--use_existing_gcp_resources',
+ default=False,
+ action='store_true',
+ help=
+ 'If set, find and use already created GCP resources instead of creating new'
+ ' ones.')
+argp.add_argument(
+ '--keep_gcp_resources',
+ default=False,
+ action='store_true',
+ help=
+ 'Leave GCP VMs and configuration running after test. Default behavior is '
+ 'to delete when tests complete.')
+argp.add_argument(
+ '--compute_discovery_document',
+ default=None,
+ type=str,
+ help=
+ 'If provided, uses this file instead of retrieving via the GCP discovery '
+ 'API')
+argp.add_argument(
+ '--alpha_compute_discovery_document',
+ default=None,
+ type=str,
+ help='If provided, uses this file instead of retrieving via the alpha GCP '
+ 'discovery API')
+argp.add_argument('--network',
+ default='global/networks/default',
+ help='GCP network to use')
+argp.add_argument('--service_port_range',
+ default='8080:8110',
+ type=parse_port_range,
+ help='Listening port for created gRPC backends. Specified as '
+ 'either a single int or as a range in the format min:max, in '
+ 'which case an available port p will be chosen s.t. min <= p '
+ '<= max')
+argp.add_argument(
+ '--stats_port',
+ default=8079,
+ type=int,
+ help='Local port for the client process to expose the LB stats service')
+argp.add_argument('--xds_server',
+ default='trafficdirector.googleapis.com:443',
+ help='xDS server')
+argp.add_argument('--source_image',
+ default='projects/debian-cloud/global/images/family/debian-9',
+ help='Source image for VMs created during the test')
+argp.add_argument('--path_to_server_binary',
+ default=None,
+ type=str,
+ help='If set, the server binary must already be pre-built on '
+ 'the specified source image')
+argp.add_argument('--machine_type',
+ default='e2-standard-2',
+ help='Machine type for VMs created during the test')
+argp.add_argument(
+ '--instance_group_size',
+ default=2,
+ type=int,
+ help='Number of VMs to create per instance group. Certain test cases (e.g., '
+ 'round_robin) may not give meaningful results if this is set to a value '
+ 'less than 2.')
+argp.add_argument('--verbose',
+ help='verbose log output',
+ default=False,
+ action='store_true')
+# TODO(ericgribkoff) Remove this param once the sponge-formatted log files are
+# visible in all test environments.
+argp.add_argument('--log_client_output',
+ help='Log captured client output',
+ default=False,
+ action='store_true')
+argp.add_argument('--only_stable_gcp_apis',
+ help='Do not use alpha compute APIs',
+ default=False,
+ action='store_true')
+args = argp.parse_args()
+
+if args.verbose:
+ logger.setLevel(logging.DEBUG)
+
+_DEFAULT_SERVICE_PORT = 80
+_WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec
+_WAIT_FOR_OPERATION_SEC = 300
+_INSTANCE_GROUP_SIZE = args.instance_group_size
+_NUM_TEST_RPCS = 10 * args.qps
+_WAIT_FOR_STATS_SEC = 180
+_WAIT_FOR_URL_MAP_PATCH_SEC = 300
+_GCP_API_RETRIES = 5
+_BOOTSTRAP_TEMPLATE = """
+{{
+ "node": {{
+ "id": "{node_id}",
+ "metadata": {{
+ "TRAFFICDIRECTOR_NETWORK_NAME": "%s"
+ }},
+ "locality": {{
+ "zone": "%s"
+ }}
+ }},
+ "xds_servers": [{{
+ "server_uri": "%s",
+ "channel_creds": [
+ {{
+ "type": "google_default",
+ "config": {{}}
+ }}
+ ]
+ }}]
+}}""" % (args.network.split('/')[-1], args.zone, args.xds_server)
+_TESTS_USING_SECONDARY_IG = [
+ 'secondary_locality_gets_no_requests_on_partial_primary_failure',
+ 'secondary_locality_gets_requests_on_primary_failure'
+]
+_USE_SECONDARY_IG = any(
+ [t in args.test_case for t in _TESTS_USING_SECONDARY_IG])
+_PATH_MATCHER_NAME = 'path-matcher'
+_BASE_TEMPLATE_NAME = 'test-template'
+_BASE_INSTANCE_GROUP_NAME = 'test-ig'
+_BASE_HEALTH_CHECK_NAME = 'test-hc'
+_BASE_FIREWALL_RULE_NAME = 'test-fw-rule'
+_BASE_BACKEND_SERVICE_NAME = 'test-backend-service'
+_BASE_URL_MAP_NAME = 'test-map'
+_BASE_SERVICE_HOST = 'grpc-test'
+_BASE_TARGET_PROXY_NAME = 'test-target-proxy'
+_BASE_FORWARDING_RULE_NAME = 'test-forwarding-rule'
+_TEST_LOG_BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ '../../reports')
+_SPONGE_LOG_NAME = 'sponge_log.log'
+_SPONGE_XML_NAME = 'sponge_log.xml'
+
+
+def get_client_stats(num_rpcs, timeout_sec):
+ with grpc.insecure_channel('localhost:%d' % args.stats_port) as channel:
+ stub = test_pb2_grpc.LoadBalancerStatsServiceStub(channel)
+ request = messages_pb2.LoadBalancerStatsRequest()
+ request.num_rpcs = num_rpcs
+ request.timeout_sec = timeout_sec
+ rpc_timeout = timeout_sec * 2 # Allow time for connection establishment
+ try:
+ response = stub.GetClientStats(request,
+ wait_for_ready=True,
+ timeout=rpc_timeout)
+ logger.debug('Invoked GetClientStats RPC: %s', response)
+ return response
+ except grpc.RpcError as rpc_error:
+ logger.exception('GetClientStats RPC failed')
+
+
+def _verify_rpcs_to_given_backends(backends, timeout_sec, num_rpcs,
+ allow_failures):
+ start_time = time.time()
+ error_msg = None
+ logger.debug('Waiting for %d sec until backends %s receive load' %
+ (timeout_sec, backends))
+ while time.time() - start_time <= timeout_sec:
+ error_msg = None
+ stats = get_client_stats(num_rpcs, timeout_sec)
+ rpcs_by_peer = stats.rpcs_by_peer
+ for backend in backends:
+ if backend not in rpcs_by_peer:
+ error_msg = 'Backend %s did not receive load' % backend
+ break
+ if not error_msg and len(rpcs_by_peer) > len(backends):
+ error_msg = 'Unexpected backend received load: %s' % rpcs_by_peer
+ if not allow_failures and stats.num_failures > 0:
+ error_msg = '%d RPCs failed' % stats.num_failures
+ if not error_msg:
+ return
+ raise Exception(error_msg)
+
+
+def wait_until_all_rpcs_go_to_given_backends_or_fail(backends,
+ timeout_sec,
+ num_rpcs=100):
+ _verify_rpcs_to_given_backends(backends,
+ timeout_sec,
+ num_rpcs,
+ allow_failures=True)
+
+
+def wait_until_all_rpcs_go_to_given_backends(backends,
+ timeout_sec,
+ num_rpcs=100):
+ _verify_rpcs_to_given_backends(backends,
+ timeout_sec,
+ num_rpcs,
+ allow_failures=False)
+
+
+def test_backends_restart(gcp, backend_service, instance_group):
+ logger.info('Running test_backends_restart')
+ instance_names = get_instance_names(gcp, instance_group)
+ num_instances = len(instance_names)
+ start_time = time.time()
+ wait_until_all_rpcs_go_to_given_backends(instance_names,
+ _WAIT_FOR_STATS_SEC)
+ stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
+ try:
+ resize_instance_group(gcp, instance_group, 0)
+ wait_until_all_rpcs_go_to_given_backends_or_fail([],
+ _WAIT_FOR_BACKEND_SEC)
+ finally:
+ resize_instance_group(gcp, instance_group, num_instances)
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+ new_instance_names = get_instance_names(gcp, instance_group)
+ wait_until_all_rpcs_go_to_given_backends(new_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ new_stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
+ original_distribution = list(stats.rpcs_by_peer.values())
+ original_distribution.sort()
+ new_distribution = list(new_stats.rpcs_by_peer.values())
+ new_distribution.sort()
+ threshold = 3
+ for i in range(len(original_distribution)):
+ if abs(original_distribution[i] - new_distribution[i]) > threshold:
+ raise Exception('Distributions do not match: ', stats, new_stats)
+
+
+def test_change_backend_service(gcp, original_backend_service, instance_group,
+ alternate_backend_service,
+ same_zone_instance_group):
+ logger.info('Running test_change_backend_service')
+ original_backend_instances = get_instance_names(gcp, instance_group)
+ alternate_backend_instances = get_instance_names(gcp,
+ same_zone_instance_group)
+ patch_backend_instances(gcp, alternate_backend_service,
+ [same_zone_instance_group])
+ wait_for_healthy_backends(gcp, original_backend_service, instance_group)
+ wait_for_healthy_backends(gcp, alternate_backend_service,
+ same_zone_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
+ _WAIT_FOR_STATS_SEC)
+ try:
+ patch_url_map_backend_service(gcp, alternate_backend_service)
+ # TODO(ericgribkoff) Verify no RPCs fail during backend switch.
+ # Currently TD may briefly send an update with no localities if adding
+ # the MIG to the backend service above races with the URL map patch.
+ wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances,
+ _WAIT_FOR_URL_MAP_PATCH_SEC)
+ finally:
+ patch_url_map_backend_service(gcp, original_backend_service)
+ patch_backend_instances(gcp, alternate_backend_service, [])
+
+
+def test_new_instance_group_receives_traffic(gcp, backend_service,
+ instance_group,
+ same_zone_instance_group):
+ logger.info('Running test_new_instance_group_receives_traffic')
+ instance_names = get_instance_names(gcp, instance_group)
+ # TODO(ericgribkoff) Reduce this timeout. When running sequentially, this
+ # occurs after patching the url map in test_change_backend_service, so we
+ # need the extended timeout here as well.
+ wait_until_all_rpcs_go_to_given_backends(instance_names,
+ _WAIT_FOR_URL_MAP_PATCH_SEC)
+ try:
+ patch_backend_instances(gcp,
+ backend_service,
+ [instance_group, same_zone_instance_group],
+ balancing_mode='RATE')
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+ wait_for_healthy_backends(gcp, backend_service,
+ same_zone_instance_group)
+ combined_instance_names = instance_names + get_instance_names(
+ gcp, same_zone_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(combined_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ finally:
+ patch_backend_instances(gcp, backend_service, [instance_group])
+
+
+def test_ping_pong(gcp, backend_service, instance_group):
+ logger.info('Running test_ping_pong')
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+ instance_names = get_instance_names(gcp, instance_group)
+ wait_until_all_rpcs_go_to_given_backends(instance_names,
+ _WAIT_FOR_STATS_SEC)
+
+
+def test_remove_instance_group(gcp, backend_service, instance_group,
+ same_zone_instance_group):
+ logger.info('Running test_remove_instance_group')
+ try:
+ patch_backend_instances(gcp,
+ backend_service,
+ [instance_group, same_zone_instance_group],
+ balancing_mode='RATE')
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+ wait_for_healthy_backends(gcp, backend_service,
+ same_zone_instance_group)
+ instance_names = get_instance_names(gcp, instance_group)
+ same_zone_instance_names = get_instance_names(gcp,
+ same_zone_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(
+ instance_names + same_zone_instance_names, _WAIT_FOR_BACKEND_SEC)
+ patch_backend_instances(gcp,
+ backend_service, [same_zone_instance_group],
+ balancing_mode='RATE')
+ wait_until_all_rpcs_go_to_given_backends(same_zone_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ finally:
+ patch_backend_instances(gcp, backend_service, [instance_group])
+ wait_until_all_rpcs_go_to_given_backends(instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+
+
+def test_round_robin(gcp, backend_service, instance_group):
+ logger.info('Running test_round_robin')
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+ instance_names = get_instance_names(gcp, instance_group)
+ threshold = 1
+ wait_until_all_rpcs_go_to_given_backends(instance_names,
+ _WAIT_FOR_STATS_SEC)
+ stats = get_client_stats(_NUM_TEST_RPCS, _WAIT_FOR_STATS_SEC)
+ requests_received = [stats.rpcs_by_peer[x] for x in stats.rpcs_by_peer]
+ total_requests_received = sum(requests_received)
+ if total_requests_received != _NUM_TEST_RPCS:
+ raise Exception('Unexpected RPC failures', stats)
+ expected_requests = total_requests_received / len(instance_names)
+ for instance in instance_names:
+ if abs(stats.rpcs_by_peer[instance] - expected_requests) > threshold:
+ raise Exception(
+ 'RPC peer distribution differs from expected by more than %d '
+ 'for instance %s (%s)', threshold, instance, stats)
+
+
+def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
+ gcp, backend_service, primary_instance_group,
+ secondary_zone_instance_group):
+ logger.info(
+ 'Running test_secondary_locality_gets_no_requests_on_partial_primary_failure'
+ )
+ try:
+ patch_backend_instances(
+ gcp, backend_service,
+ [primary_instance_group, secondary_zone_instance_group])
+ wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
+ wait_for_healthy_backends(gcp, backend_service,
+ secondary_zone_instance_group)
+ primary_instance_names = get_instance_names(gcp, instance_group)
+ secondary_instance_names = get_instance_names(
+ gcp, secondary_zone_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
+ _WAIT_FOR_STATS_SEC)
+ original_size = len(primary_instance_names)
+ resize_instance_group(gcp, primary_instance_group, original_size - 1)
+ remaining_instance_names = get_instance_names(gcp,
+ primary_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(remaining_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ finally:
+ patch_backend_instances(gcp, backend_service, [primary_instance_group])
+ resize_instance_group(gcp, primary_instance_group, original_size)
+
+
+def test_secondary_locality_gets_requests_on_primary_failure(
+ gcp, backend_service, primary_instance_group,
+ secondary_zone_instance_group):
+ logger.info(
+ 'Running test_secondary_locality_gets_requests_on_primary_failure')
+ try:
+ patch_backend_instances(
+ gcp, backend_service,
+ [primary_instance_group, secondary_zone_instance_group])
+ wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
+ wait_for_healthy_backends(gcp, backend_service,
+ secondary_zone_instance_group)
+ primary_instance_names = get_instance_names(gcp, instance_group)
+ secondary_instance_names = get_instance_names(
+ gcp, secondary_zone_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ original_size = len(primary_instance_names)
+ resize_instance_group(gcp, primary_instance_group, 0)
+ wait_until_all_rpcs_go_to_given_backends(secondary_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+
+ resize_instance_group(gcp, primary_instance_group, original_size)
+ new_instance_names = get_instance_names(gcp, primary_instance_group)
+ wait_for_healthy_backends(gcp, backend_service, primary_instance_group)
+ wait_until_all_rpcs_go_to_given_backends(new_instance_names,
+ _WAIT_FOR_BACKEND_SEC)
+ finally:
+ patch_backend_instances(gcp, backend_service, [primary_instance_group])
+
+
+def get_startup_script(path_to_server_binary, service_port):
+ if path_to_server_binary:
+ return "nohup %s --port=%d 1>/dev/null &" % (path_to_server_binary,
+ service_port)
+ else:
+ return """#!/bin/bash
+sudo apt update
+sudo apt install -y git default-jdk
+mkdir java_server
+pushd java_server
+git clone https://github.com/grpc/grpc-java.git
+pushd grpc-java
+pushd interop-testing
+../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true
+
+nohup build/install/grpc-interop-testing/bin/xds-test-server \
+ --port=%d 1>/dev/null &""" % service_port
+
+
+def create_instance_template(gcp, name, network, source_image, machine_type,
+ startup_script):
+ config = {
+ 'name': name,
+ 'properties': {
+ 'tags': {
+ 'items': ['allow-health-checks']
+ },
+ 'machineType': machine_type,
+ 'serviceAccounts': [{
+ 'email': 'default',
+ 'scopes': ['https://www.googleapis.com/auth/cloud-platform',]
+ }],
+ 'networkInterfaces': [{
+ 'accessConfigs': [{
+ 'type': 'ONE_TO_ONE_NAT'
+ }],
+ 'network': network
+ }],
+ 'disks': [{
+ 'boot': True,
+ 'initializeParams': {
+ 'sourceImage': source_image
+ }
+ }],
+ 'metadata': {
+ 'items': [{
+ 'key': 'startup-script',
+ 'value': startup_script
+ }]
+ }
+ }
+ }
+
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.instanceTemplates().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.instance_template = GcpResource(config['name'], result['targetLink'])
+
+
+def add_instance_group(gcp, zone, name, size):
+ config = {
+ 'name': name,
+ 'instanceTemplate': gcp.instance_template.url,
+ 'targetSize': size,
+ 'namedPorts': [{
+ 'name': 'grpc',
+ 'port': gcp.service_port
+ }]
+ }
+
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.instanceGroupManagers().insert(
+ project=gcp.project, zone=zone,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_zone_operation(gcp, zone, result['name'])
+ result = gcp.compute.instanceGroupManagers().get(
+ project=gcp.project, zone=zone,
+ instanceGroupManager=config['name']).execute(
+ num_retries=_GCP_API_RETRIES)
+ instance_group = InstanceGroup(config['name'], result['instanceGroup'],
+ zone)
+ gcp.instance_groups.append(instance_group)
+ return instance_group
+
+
+def create_health_check(gcp, name):
+ if gcp.alpha_compute:
+ config = {
+ 'name': name,
+ 'type': 'GRPC',
+ 'grpcHealthCheck': {
+ 'portSpecification': 'USE_SERVING_PORT'
+ }
+ }
+ compute_to_use = gcp.alpha_compute
+ else:
+ config = {
+ 'name': name,
+ 'type': 'TCP',
+ 'tcpHealthCheck': {
+ 'portName': 'grpc'
+ }
+ }
+ compute_to_use = gcp.compute
+ logger.debug('Sending GCP request with body=%s', config)
+ result = compute_to_use.healthChecks().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.health_check = GcpResource(config['name'], result['targetLink'])
+
+
+def create_health_check_firewall_rule(gcp, name):
+ config = {
+ 'name': name,
+ 'direction': 'INGRESS',
+ 'allowed': [{
+ 'IPProtocol': 'tcp'
+ }],
+ 'sourceRanges': ['35.191.0.0/16', '130.211.0.0/22'],
+ 'targetTags': ['allow-health-checks'],
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.firewalls().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.health_check_firewall_rule = GcpResource(config['name'],
+ result['targetLink'])
+
+
+def add_backend_service(gcp, name):
+ if gcp.alpha_compute:
+ protocol = 'GRPC'
+ compute_to_use = gcp.alpha_compute
+ else:
+ protocol = 'HTTP2'
+ compute_to_use = gcp.compute
+ config = {
+ 'name': name,
+ 'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
+ 'healthChecks': [gcp.health_check.url],
+ 'portName': 'grpc',
+ 'protocol': protocol
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = compute_to_use.backendServices().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ backend_service = GcpResource(config['name'], result['targetLink'])
+ gcp.backend_services.append(backend_service)
+ return backend_service
+
+
+def create_url_map(gcp, name, backend_service, host_name):
+ config = {
+ 'name': name,
+ 'defaultService': backend_service.url,
+ 'pathMatchers': [{
+ 'name': _PATH_MATCHER_NAME,
+ 'defaultService': backend_service.url,
+ }],
+ 'hostRules': [{
+ 'hosts': [host_name],
+ 'pathMatcher': _PATH_MATCHER_NAME
+ }]
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.urlMaps().insert(
+ project=gcp.project, body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.url_map = GcpResource(config['name'], result['targetLink'])
+
+
+def patch_url_map_host_rule_with_port(gcp, name, backend_service, host_name):
+ config = {
+ 'hostRules': [{
+ 'hosts': ['%s:%d' % (host_name, gcp.service_port)],
+ 'pathMatcher': _PATH_MATCHER_NAME
+ }]
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.urlMaps().patch(
+ project=gcp.project, urlMap=name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+
+
+def create_target_proxy(gcp, name):
+ if gcp.alpha_compute:
+ config = {
+ 'name': name,
+ 'url_map': gcp.url_map.url,
+ 'validate_for_proxyless': True,
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.alpha_compute.targetGrpcProxies().insert(
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ else:
+ config = {
+ 'name': name,
+ 'url_map': gcp.url_map.url,
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.targetHttpProxies().insert(
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.target_proxy = GcpResource(config['name'], result['targetLink'])
+
+
+def create_global_forwarding_rule(gcp, name, potential_ports):
+ if gcp.alpha_compute:
+ compute_to_use = gcp.alpha_compute
+ else:
+ compute_to_use = gcp.compute
+ for port in potential_ports:
+ try:
+ config = {
+ 'name': name,
+ 'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
+ 'portRange': str(port),
+ 'IPAddress': '0.0.0.0',
+ 'network': args.network,
+ 'target': gcp.target_proxy.url,
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = compute_to_use.globalForwardingRules().insert(
+ project=gcp.project,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ gcp.global_forwarding_rule = GcpResource(config['name'],
+ result['targetLink'])
+ gcp.service_port = port
+ return
+ except googleapiclient.errors.HttpError as http_error:
+ logger.warning(
+ 'Got error %s when attempting to create forwarding rule to '
+ '0.0.0.0:%d. Retrying with another port.' % (http_error, port))
+
+
+def get_health_check(gcp, health_check_name):
+ result = gcp.compute.healthChecks().get(
+ project=gcp.project, healthCheck=health_check_name).execute()
+ gcp.health_check = GcpResource(health_check_name, result['selfLink'])
+
+
+def get_health_check_firewall_rule(gcp, firewall_name):
+ result = gcp.compute.firewalls().get(project=gcp.project,
+ firewall=firewall_name).execute()
+ gcp.health_check_firewall_rule = GcpResource(firewall_name,
+ result['selfLink'])
+
+
+def get_backend_service(gcp, backend_service_name):
+ result = gcp.compute.backendServices().get(
+ project=gcp.project, backendService=backend_service_name).execute()
+ backend_service = GcpResource(backend_service_name, result['selfLink'])
+ gcp.backend_services.append(backend_service)
+ return backend_service
+
+
+def get_url_map(gcp, url_map_name):
+ result = gcp.compute.urlMaps().get(project=gcp.project,
+ urlMap=url_map_name).execute()
+ gcp.url_map = GcpResource(url_map_name, result['selfLink'])
+
+
+def get_target_proxy(gcp, target_proxy_name):
+ if gcp.alpha_compute:
+ result = gcp.alpha_compute.targetGrpcProxies().get(
+ project=gcp.project, targetGrpcProxy=target_proxy_name).execute()
+ else:
+ result = gcp.compute.targetHttpProxies().get(
+ project=gcp.project, targetHttpProxy=target_proxy_name).execute()
+ gcp.target_proxy = GcpResource(target_proxy_name, result['selfLink'])
+
+
+def get_global_forwarding_rule(gcp, forwarding_rule_name):
+ result = gcp.compute.globalForwardingRules().get(
+ project=gcp.project, forwardingRule=forwarding_rule_name).execute()
+ gcp.global_forwarding_rule = GcpResource(forwarding_rule_name,
+ result['selfLink'])
+
+
+def get_instance_template(gcp, template_name):
+ result = gcp.compute.instanceTemplates().get(
+ project=gcp.project, instanceTemplate=template_name).execute()
+ gcp.instance_template = GcpResource(template_name, result['selfLink'])
+
+
+def get_instance_group(gcp, zone, instance_group_name):
+ result = gcp.compute.instanceGroups().get(
+ project=gcp.project, zone=zone,
+ instanceGroup=instance_group_name).execute()
+ gcp.service_port = result['namedPorts'][0]['port']
+ instance_group = InstanceGroup(instance_group_name, result['selfLink'],
+ zone)
+ gcp.instance_groups.append(instance_group)
+ return instance_group
+
+
+def delete_global_forwarding_rule(gcp):
+ try:
+ result = gcp.compute.globalForwardingRules().delete(
+ project=gcp.project,
+ forwardingRule=gcp.global_forwarding_rule.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_target_proxy(gcp):
+ try:
+ if gcp.alpha_compute:
+ result = gcp.alpha_compute.targetGrpcProxies().delete(
+ project=gcp.project,
+ targetGrpcProxy=gcp.target_proxy.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ else:
+ result = gcp.compute.targetHttpProxies().delete(
+ project=gcp.project,
+ targetHttpProxy=gcp.target_proxy.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_url_map(gcp):
+ try:
+ result = gcp.compute.urlMaps().delete(
+ project=gcp.project,
+ urlMap=gcp.url_map.name).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_backend_services(gcp):
+ for backend_service in gcp.backend_services:
+ try:
+ result = gcp.compute.backendServices().delete(
+ project=gcp.project,
+ backendService=backend_service.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_firewall(gcp):
+ try:
+ result = gcp.compute.firewalls().delete(
+ project=gcp.project,
+ firewall=gcp.health_check_firewall_rule.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_health_check(gcp):
+ try:
+ result = gcp.compute.healthChecks().delete(
+ project=gcp.project, healthCheck=gcp.health_check.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_instance_groups(gcp):
+ for instance_group in gcp.instance_groups:
+ try:
+ result = gcp.compute.instanceGroupManagers().delete(
+ project=gcp.project,
+ zone=instance_group.zone,
+ instanceGroupManager=instance_group.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_zone_operation(gcp,
+ instance_group.zone,
+ result['name'],
+ timeout_sec=_WAIT_FOR_BACKEND_SEC)
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def delete_instance_template(gcp):
+ try:
+ result = gcp.compute.instanceTemplates().delete(
+ project=gcp.project,
+ instanceTemplate=gcp.instance_template.name).execute(
+ num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+ except googleapiclient.errors.HttpError as http_error:
+ logger.info('Delete failed: %s', http_error)
+
+
+def patch_backend_instances(gcp,
+ backend_service,
+ instance_groups,
+ balancing_mode='UTILIZATION'):
+ if gcp.alpha_compute:
+ compute_to_use = gcp.alpha_compute
+ else:
+ compute_to_use = gcp.compute
+ config = {
+ 'backends': [{
+ 'group': instance_group.url,
+ 'balancingMode': balancing_mode,
+ 'maxRate': 1 if balancing_mode == 'RATE' else None
+ } for instance_group in instance_groups],
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = compute_to_use.backendServices().patch(
+ project=gcp.project, backendService=backend_service.name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp,
+ result['name'],
+ timeout_sec=_WAIT_FOR_BACKEND_SEC)
+
+
+def resize_instance_group(gcp,
+ instance_group,
+ new_size,
+ timeout_sec=_WAIT_FOR_OPERATION_SEC):
+ result = gcp.compute.instanceGroupManagers().resize(
+ project=gcp.project,
+ zone=instance_group.zone,
+ instanceGroupManager=instance_group.name,
+ size=new_size).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_zone_operation(gcp,
+ instance_group.zone,
+ result['name'],
+ timeout_sec=360)
+ start_time = time.time()
+ while True:
+ current_size = len(get_instance_names(gcp, instance_group))
+ if current_size == new_size:
+ break
+ if time.time() - start_time > timeout_sec:
+ raise Exception('Failed to resize primary instance group')
+ time.sleep(2)
+
+
+def patch_url_map_backend_service(gcp, backend_service):
+ config = {
+ 'defaultService':
+ backend_service.url,
+ 'pathMatchers': [{
+ 'name': _PATH_MATCHER_NAME,
+ 'defaultService': backend_service.url,
+ }]
+ }
+ logger.debug('Sending GCP request with body=%s', config)
+ result = gcp.compute.urlMaps().patch(
+ project=gcp.project, urlMap=gcp.url_map.name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ wait_for_global_operation(gcp, result['name'])
+
+
+def wait_for_global_operation(gcp,
+ operation,
+ timeout_sec=_WAIT_FOR_OPERATION_SEC):
+ start_time = time.time()
+ while time.time() - start_time <= timeout_sec:
+ result = gcp.compute.globalOperations().get(
+ project=gcp.project,
+ operation=operation).execute(num_retries=_GCP_API_RETRIES)
+ if result['status'] == 'DONE':
+ if 'error' in result:
+ raise Exception(result['error'])
+ return
+ time.sleep(2)
+ raise Exception('Operation %s did not complete within %d', operation,
+ timeout_sec)
+
+
+def wait_for_zone_operation(gcp,
+ zone,
+ operation,
+ timeout_sec=_WAIT_FOR_OPERATION_SEC):
+ start_time = time.time()
+ while time.time() - start_time <= timeout_sec:
+ result = gcp.compute.zoneOperations().get(
+ project=gcp.project, zone=zone,
+ operation=operation).execute(num_retries=_GCP_API_RETRIES)
+ if result['status'] == 'DONE':
+ if 'error' in result:
+ raise Exception(result['error'])
+ return
+ time.sleep(2)
+ raise Exception('Operation %s did not complete within %d', operation,
+ timeout_sec)
+
+
+def wait_for_healthy_backends(gcp,
+ backend_service,
+ instance_group,
+ timeout_sec=_WAIT_FOR_BACKEND_SEC):
+ start_time = time.time()
+ config = {'group': instance_group.url}
+ while time.time() - start_time <= timeout_sec:
+ result = gcp.compute.backendServices().getHealth(
+ project=gcp.project,
+ backendService=backend_service.name,
+ body=config).execute(num_retries=_GCP_API_RETRIES)
+ if 'healthStatus' in result:
+ healthy = True
+ for instance in result['healthStatus']:
+ if instance['healthState'] != 'HEALTHY':
+ healthy = False
+ break
+ if healthy:
+ return
+ time.sleep(2)
+ raise Exception('Not all backends became healthy within %d seconds: %s' %
+ (timeout_sec, result))
+
+
+def get_instance_names(gcp, instance_group):
+ instance_names = []
+ result = gcp.compute.instanceGroups().listInstances(
+ project=gcp.project,
+ zone=instance_group.zone,
+ instanceGroup=instance_group.name,
+ body={
+ 'instanceState': 'ALL'
+ }).execute(num_retries=_GCP_API_RETRIES)
+ if 'items' not in result:
+ return []
+ for item in result['items']:
+ # listInstances() returns the full URL of the instance, which ends with
+ # the instance name. compute.instances().get() requires using the
+ # instance name (not the full URL) to look up instance details, so we
+ # just extract the name manually.
+ instance_name = item['instance'].split('/')[-1]
+ instance_names.append(instance_name)
+ return instance_names
+
+
+def clean_up(gcp):
+ if gcp.global_forwarding_rule:
+ delete_global_forwarding_rule(gcp)
+ if gcp.target_proxy:
+ delete_target_proxy(gcp)
+ if gcp.url_map:
+ delete_url_map(gcp)
+ delete_backend_services(gcp)
+ if gcp.health_check_firewall_rule:
+ delete_firewall(gcp)
+ if gcp.health_check:
+ delete_health_check(gcp)
+ delete_instance_groups(gcp)
+ if gcp.instance_template:
+ delete_instance_template(gcp)
+
+
+class InstanceGroup(object):
+
+ def __init__(self, name, url, zone):
+ self.name = name
+ self.url = url
+ self.zone = zone
+
+
+class GcpResource(object):
+
+ def __init__(self, name, url):
+ self.name = name
+ self.url = url
+
+
+class GcpState(object):
+
+ def __init__(self, compute, alpha_compute, project):
+ self.compute = compute
+ self.alpha_compute = alpha_compute
+ self.project = project
+ self.health_check = None
+ self.health_check_firewall_rule = None
+ self.backend_services = []
+ self.url_map = None
+ self.target_proxy = None
+ self.global_forwarding_rule = None
+ self.service_port = None
+ self.instance_template = None
+ self.instance_groups = []
+
+
+alpha_compute = None
+if args.compute_discovery_document:
+ with open(args.compute_discovery_document, 'r') as discovery_doc:
+ compute = googleapiclient.discovery.build_from_document(
+ discovery_doc.read())
+ if not args.only_stable_gcp_apis and args.alpha_compute_discovery_document:
+ with open(args.alpha_compute_discovery_document, 'r') as discovery_doc:
+ alpha_compute = googleapiclient.discovery.build_from_document(
+ discovery_doc.read())
+else:
+ compute = googleapiclient.discovery.build('compute', 'v1')
+ if not args.only_stable_gcp_apis:
+ alpha_compute = googleapiclient.discovery.build('compute', 'alpha')
+
+try:
+ gcp = GcpState(compute, alpha_compute, args.project_id)
+ health_check_name = _BASE_HEALTH_CHECK_NAME + args.gcp_suffix
+ firewall_name = _BASE_FIREWALL_RULE_NAME + args.gcp_suffix
+ backend_service_name = _BASE_BACKEND_SERVICE_NAME + args.gcp_suffix
+ alternate_backend_service_name = _BASE_BACKEND_SERVICE_NAME + '-alternate' + args.gcp_suffix
+ url_map_name = _BASE_URL_MAP_NAME + args.gcp_suffix
+ service_host_name = _BASE_SERVICE_HOST + args.gcp_suffix
+ target_proxy_name = _BASE_TARGET_PROXY_NAME + args.gcp_suffix
+ forwarding_rule_name = _BASE_FORWARDING_RULE_NAME + args.gcp_suffix
+ template_name = _BASE_TEMPLATE_NAME + args.gcp_suffix
+ instance_group_name = _BASE_INSTANCE_GROUP_NAME + args.gcp_suffix
+ same_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-same-zone' + args.gcp_suffix
+ if _USE_SECONDARY_IG:
+ secondary_zone_instance_group_name = _BASE_INSTANCE_GROUP_NAME + '-secondary-zone' + args.gcp_suffix
+ if args.use_existing_gcp_resources:
+ logger.info('Reusing existing GCP resources')
+ get_health_check(gcp, health_check_name)
+ try:
+ get_health_check_firewall_rule(gcp, firewall_name)
+ except googleapiclient.errors.HttpError as http_error:
+ # Firewall rule may be auto-deleted periodically depending on GCP
+ # project settings.
+ logger.exception('Failed to find firewall rule, recreating')
+ create_health_check_firewall_rule(gcp, firewall_name)
+ backend_service = get_backend_service(gcp, backend_service_name)
+ alternate_backend_service = get_backend_service(
+ gcp, alternate_backend_service_name)
+ get_url_map(gcp, url_map_name)
+ get_target_proxy(gcp, target_proxy_name)
+ get_global_forwarding_rule(gcp, forwarding_rule_name)
+ get_instance_template(gcp, template_name)
+ instance_group = get_instance_group(gcp, args.zone, instance_group_name)
+ same_zone_instance_group = get_instance_group(
+ gcp, args.zone, same_zone_instance_group_name)
+ if _USE_SECONDARY_IG:
+ secondary_zone_instance_group = get_instance_group(
+ gcp, args.secondary_zone, secondary_zone_instance_group_name)
+ else:
+ create_health_check(gcp, health_check_name)
+ create_health_check_firewall_rule(gcp, firewall_name)
+ backend_service = add_backend_service(gcp, backend_service_name)
+ alternate_backend_service = add_backend_service(
+ gcp, alternate_backend_service_name)
+ create_url_map(gcp, url_map_name, backend_service, service_host_name)
+ create_target_proxy(gcp, target_proxy_name)
+ potential_service_ports = list(args.service_port_range)
+ random.shuffle(potential_service_ports)
+ create_global_forwarding_rule(gcp, forwarding_rule_name,
+ potential_service_ports)
+ if not gcp.service_port:
+ raise Exception(
+ 'Failed to find a valid ip:port for the forwarding rule')
+ if gcp.service_port != _DEFAULT_SERVICE_PORT:
+ patch_url_map_host_rule_with_port(gcp, url_map_name,
+ backend_service,
+ service_host_name)
+ startup_script = get_startup_script(args.path_to_server_binary,
+ gcp.service_port)
+ create_instance_template(gcp, template_name, args.network,
+ args.source_image, args.machine_type,
+ startup_script)
+ instance_group = add_instance_group(gcp, args.zone, instance_group_name,
+ _INSTANCE_GROUP_SIZE)
+ patch_backend_instances(gcp, backend_service, [instance_group])
+ same_zone_instance_group = add_instance_group(
+ gcp, args.zone, same_zone_instance_group_name, _INSTANCE_GROUP_SIZE)
+ if _USE_SECONDARY_IG:
+ secondary_zone_instance_group = add_instance_group(
+ gcp, args.secondary_zone, secondary_zone_instance_group_name,
+ _INSTANCE_GROUP_SIZE)
+
+ wait_for_healthy_backends(gcp, backend_service, instance_group)
+
+ if args.test_case:
+
+ if gcp.service_port == _DEFAULT_SERVICE_PORT:
+ server_uri = service_host_name
+ else:
+ server_uri = service_host_name + ':' + str(gcp.service_port)
+ if args.bootstrap_file:
+ bootstrap_path = os.path.abspath(args.bootstrap_file)
+ else:
+ with tempfile.NamedTemporaryFile(delete=False) as bootstrap_file:
+ bootstrap_file.write(
+ _BOOTSTRAP_TEMPLATE.format(
+ node_id=socket.gethostname()).encode('utf-8'))
+ bootstrap_path = bootstrap_file.name
+ client_env = dict(os.environ, GRPC_XDS_BOOTSTRAP=bootstrap_path)
+ client_cmd = shlex.split(
+ args.client_cmd.format(server_uri=server_uri,
+ stats_port=args.stats_port,
+ qps=args.qps))
+
+ test_results = {}
+ failed_tests = []
+ for test_case in args.test_case:
+ result = jobset.JobResult()
+ log_dir = os.path.join(_TEST_LOG_BASE_DIR, test_case)
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+ test_log_filename = os.path.join(log_dir, _SPONGE_LOG_NAME)
+ test_log_file = open(test_log_filename, 'w+')
+ client_process = None
+ try:
+ client_process = subprocess.Popen(client_cmd,
+ env=client_env,
+ stderr=subprocess.STDOUT,
+ stdout=test_log_file)
+ if test_case == 'backends_restart':
+ test_backends_restart(gcp, backend_service, instance_group)
+ elif test_case == 'change_backend_service':
+ test_change_backend_service(gcp, backend_service,
+ instance_group,
+ alternate_backend_service,
+ same_zone_instance_group)
+ elif test_case == 'new_instance_group_receives_traffic':
+ test_new_instance_group_receives_traffic(
+ gcp, backend_service, instance_group,
+ same_zone_instance_group)
+ elif test_case == 'ping_pong':
+ test_ping_pong(gcp, backend_service, instance_group)
+ elif test_case == 'remove_instance_group':
+ test_remove_instance_group(gcp, backend_service,
+ instance_group,
+ same_zone_instance_group)
+ elif test_case == 'round_robin':
+ test_round_robin(gcp, backend_service, instance_group)
+ elif test_case == 'secondary_locality_gets_no_requests_on_partial_primary_failure':
+ test_secondary_locality_gets_no_requests_on_partial_primary_failure(
+ gcp, backend_service, instance_group,
+ secondary_zone_instance_group)
+ elif test_case == 'secondary_locality_gets_requests_on_primary_failure':
+ test_secondary_locality_gets_requests_on_primary_failure(
+ gcp, backend_service, instance_group,
+ secondary_zone_instance_group)
+ else:
+ logger.error('Unknown test case: %s', test_case)
+ sys.exit(1)
+ result.state = 'PASSED'
+ result.returncode = 0
+ except Exception as e:
+ logger.exception('Test case %s failed', test_case)
+ failed_tests.append(test_case)
+ result.state = 'FAILED'
+ result.message = str(e)
+ finally:
+ if client_process:
+ client_process.terminate()
+ test_log_file.close()
+ # Workaround for Python 3, as report_utils will invoke decode() on
+ # result.message, which has a default value of ''.
+ result.message = result.message.encode('UTF-8')
+ test_results[test_case] = [result]
+ if args.log_client_output:
+ logger.info('Client output:')
+ with open(test_log_filename, 'r') as client_output:
+ logger.info(client_output.read())
+ if not os.path.exists(_TEST_LOG_BASE_DIR):
+ os.makedirs(_TEST_LOG_BASE_DIR)
+ report_utils.render_junit_xml_report(test_results,
+ os.path.join(
+ _TEST_LOG_BASE_DIR,
+ _SPONGE_XML_NAME),
+ suite_name='xds_tests',
+ multi_target=True)
+ if failed_tests:
+ logger.error('Test case(s) %s failed', failed_tests)
+ sys.exit(1)
+finally:
+ if not args.keep_gcp_resources:
+ logger.info('Cleaning up GCP resources. This may take some time.')
+ clean_up(gcp)
diff --git a/grpc/tools/run_tests/sanity/check_bad_dependencies.sh b/grpc/tools/run_tests/sanity/check_bad_dependencies.sh
new file mode 100755
index 00000000..588b3e30
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_bad_dependencies.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Make sure that there is no path from known unsecure libraries and targets
+# to an SSL library. Any failure among these will make the script fail.
+
+test "$(bazel query 'somepath("//:grpc_unsecure", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//:grpc++_unsecure", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//:grpc++_codegen_proto", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//test/cpp/microbenchmarks:helpers", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+
+# Make sure that core doesn't depend on anything in C++ library
+
+test "$(bazel query 'deps("//:grpc")' 2>/dev/null | grep -Ec 'src/cpp|include/grpcpp')" -eq 0 || exit 1
+
+exit 0
+
diff --git a/grpc/tools/run_tests/sanity/check_bazel_workspace.py b/grpc/tools/run_tests/sanity/check_bazel_workspace.py
new file mode 100755
index 00000000..641692dc
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_bazel_workspace.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import ast
+import os
+import re
+import subprocess
+import sys
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+git_hash_pattern = re.compile('[0-9a-f]{40}')
+
+# Parse git hashes from submodules
+git_submodules = subprocess.check_output('git submodule',
+ shell=True).strip().split('\n')
+git_submodule_hashes = {
+ re.search(git_hash_pattern, s).group() for s in git_submodules
+}
+
+_BAZEL_SKYLIB_DEP_NAME = 'bazel_skylib'
+_BAZEL_TOOLCHAINS_DEP_NAME = 'bazel_toolchains'
+_TWISTED_TWISTED_DEP_NAME = 'com_github_twisted_twisted'
+_YAML_PYYAML_DEP_NAME = 'com_github_yaml_pyyaml'
+_TWISTED_INCREMENTAL_DEP_NAME = 'com_github_twisted_incremental'
+_ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME = 'com_github_zopefoundation_zope_interface'
+_TWISTED_CONSTANTLY_DEP_NAME = 'com_github_twisted_constantly'
+
+_GRPC_DEP_NAMES = [
+ 'upb',
+ 'boringssl',
+ 'zlib',
+ 'com_google_protobuf',
+ 'com_github_google_googletest',
+ 'rules_cc',
+ 'com_github_gflags_gflags',
+ 'com_github_google_benchmark',
+ 'com_github_cares_cares',
+ 'com_google_absl',
+ 'io_opencensus_cpp',
+ 'envoy_api',
+ _BAZEL_SKYLIB_DEP_NAME,
+ _BAZEL_TOOLCHAINS_DEP_NAME,
+ _TWISTED_TWISTED_DEP_NAME,
+ _YAML_PYYAML_DEP_NAME,
+ _TWISTED_INCREMENTAL_DEP_NAME,
+ _ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME,
+ _TWISTED_CONSTANTLY_DEP_NAME,
+ 'io_bazel_rules_go',
+ 'build_bazel_rules_apple',
+ 'build_bazel_apple_support',
+ 'libuv',
+]
+
+_GRPC_BAZEL_ONLY_DEPS = [
+ 'rules_cc',
+ 'com_google_absl',
+ 'io_opencensus_cpp',
+ _BAZEL_SKYLIB_DEP_NAME,
+ _BAZEL_TOOLCHAINS_DEP_NAME,
+ _TWISTED_TWISTED_DEP_NAME,
+ _YAML_PYYAML_DEP_NAME,
+ _TWISTED_INCREMENTAL_DEP_NAME,
+ _ZOPEFOUNDATION_ZOPE_INTERFACE_DEP_NAME,
+ _TWISTED_CONSTANTLY_DEP_NAME,
+ 'io_bazel_rules_go',
+ 'build_bazel_rules_apple',
+ 'build_bazel_apple_support',
+]
+
+
+class BazelEvalState(object):
+
+ def __init__(self, names_and_urls, overridden_name=None):
+ self.names_and_urls = names_and_urls
+ self.overridden_name = overridden_name
+
+ def http_archive(self, **args):
+ self.archive(**args)
+
+ def new_http_archive(self, **args):
+ self.archive(**args)
+
+ def bind(self, **args):
+ pass
+
+ def existing_rules(self):
+ if self.overridden_name:
+ return [self.overridden_name]
+ return []
+
+ def archive(self, **args):
+ assert self.names_and_urls.get(args['name']) is None
+ if args['name'] in _GRPC_BAZEL_ONLY_DEPS:
+ self.names_and_urls[args['name']] = 'dont care'
+ return
+ self.names_and_urls[args['name']] = args['url']
+
+ def git_repository(self, **args):
+ assert self.names_and_urls.get(args['name']) is None
+ if args['name'] in _GRPC_BAZEL_ONLY_DEPS:
+ self.names_and_urls[args['name']] = 'dont care'
+ return
+ self.names_and_urls[args['name']] = args['remote']
+
+ def grpc_python_deps(self):
+ pass
+
+
+# Parse git hashes from bazel/grpc_deps.bzl {new_}http_archive rules
+with open(os.path.join('bazel', 'grpc_deps.bzl'), 'r') as f:
+ names_and_urls = {}
+ eval_state = BazelEvalState(names_and_urls)
+ bazel_file = f.read()
+
+# grpc_deps.bzl only defines 'grpc_deps' and 'grpc_test_only_deps', add these
+# lines to call them.
+bazel_file += '\ngrpc_deps()\n'
+bazel_file += '\ngrpc_test_only_deps()\n'
+build_rules = {
+ 'native': eval_state,
+ 'http_archive': lambda **args: eval_state.http_archive(**args),
+ 'load': lambda a, b: None,
+ 'git_repository': lambda **args: eval_state.git_repository(**args),
+ 'grpc_python_deps': lambda: None,
+}
+exec(bazel_file) in build_rules
+for name in _GRPC_DEP_NAMES:
+ assert name in names_and_urls.keys()
+assert len(_GRPC_DEP_NAMES) == len(names_and_urls.keys())
+
+# There are some "bazel-only" deps that are exceptions to this sanity check,
+# we don't require that there is a corresponding git module for these.
+names_without_bazel_only_deps = names_and_urls.keys()
+for dep_name in _GRPC_BAZEL_ONLY_DEPS:
+ names_without_bazel_only_deps.remove(dep_name)
+archive_urls = [names_and_urls[name] for name in names_without_bazel_only_deps]
+workspace_git_hashes = {
+ re.search(git_hash_pattern, url).group() for url in archive_urls
+}
+if len(workspace_git_hashes) == 0:
+ print("(Likely) parse error, did not find any bazel git dependencies.")
+ sys.exit(1)
+
+# Validate the equivalence of the git submodules and Bazel git dependencies. The
+# condition we impose is that there is a git submodule for every dependency in
+# the workspace, but not necessarily conversely. E.g. Bloaty is a dependency
+# not used by any of the targets built by Bazel.
+if len(workspace_git_hashes - git_submodule_hashes) > 0:
+ print(
+ "Found discrepancies between git submodules and Bazel WORKSPACE dependencies"
+ )
+
+# Also check that we can override each dependency
+for name in _GRPC_DEP_NAMES:
+ names_and_urls_with_overridden_name = {}
+ state = BazelEvalState(names_and_urls_with_overridden_name,
+ overridden_name=name)
+ rules = {
+ 'native': state,
+ 'http_archive': lambda **args: state.http_archive(**args),
+ 'load': lambda a, b: None,
+ 'git_repository': lambda **args: state.git_repository(**args),
+ 'grpc_python_deps': lambda *args, **kwargs: None,
+ }
+ exec(bazel_file) in rules
+ assert name not in names_and_urls_with_overridden_name.keys()
+
+sys.exit(0)
diff --git a/grpc/tools/run_tests/sanity/check_buildifier.sh b/grpc/tools/run_tests/sanity/check_buildifier.sh
new file mode 100755
index 00000000..bb183711
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_buildifier.sh
@@ -0,0 +1,31 @@
+#! /bin/bash
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# The script to check if Bazel files need to be formatted.
+
+GIT_ROOT="$(dirname "$0")/../../.."
+"$GIT_ROOT/tools/distrib/buildifier_format_code.sh" -mode=diff
+result=$?
+
+if [[ ${result} != 0 ]]; then
+ echo "==========BUILDIFIER CHECK FAILED=========="
+ echo "Please try using the following script to fix automatically:"
+ echo ""
+ echo " tools/distrib/buildifier_format_code.sh"
+ echo ""
+ exit 1
+else
+ echo "==========BUILDIFIER CHECK PASSED=========="
+fi
diff --git a/grpc/tools/run_tests/sanity/check_cache_mk.sh b/grpc/tools/run_tests/sanity/check_cache_mk.sh
new file mode 100755
index 00000000..0d47e575
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_cache_mk.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+if [ -f cache.mk ] ; then
+ echo "Please don't commit cache.mk"
+ exit 1
+fi
+
diff --git a/grpc/tools/run_tests/sanity/check_deprecated_grpc++.py b/grpc/tools/run_tests/sanity/check_deprecated_grpc++.py
new file mode 100755
index 00000000..02307fed
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_deprecated_grpc++.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import os
+import sys
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+expected_files = [
+ "include/grpc++/create_channel_posix.h", "include/grpc++/server_builder.h",
+ "include/grpc++/resource_quota.h", "include/grpc++/create_channel.h",
+ "include/grpc++/alarm.h", "include/grpc++/server.h",
+ "include/grpc++/server_context.h", "include/grpc++/client_context.h",
+ "include/grpc++/server_posix.h", "include/grpc++/grpc++.h",
+ "include/grpc++/health_check_service_interface.h",
+ "include/grpc++/completion_queue.h", "include/grpc++/channel.h",
+ "include/grpc++/support/sync_stream.h", "include/grpc++/support/status.h",
+ "include/grpc++/support/config.h",
+ "include/grpc++/support/status_code_enum.h",
+ "include/grpc++/support/byte_buffer.h",
+ "include/grpc++/support/error_details.h",
+ "include/grpc++/support/async_unary_call.h",
+ "include/grpc++/support/channel_arguments.h",
+ "include/grpc++/support/async_stream.h", "include/grpc++/support/slice.h",
+ "include/grpc++/support/stub_options.h",
+ "include/grpc++/support/string_ref.h", "include/grpc++/support/time.h",
+ "include/grpc++/security/auth_metadata_processor.h",
+ "include/grpc++/security/credentials.h",
+ "include/grpc++/security/server_credentials.h",
+ "include/grpc++/security/auth_context.h",
+ "include/grpc++/impl/rpc_method.h",
+ "include/grpc++/impl/server_builder_option.h", "include/grpc++/impl/call.h",
+ "include/grpc++/impl/service_type.h", "include/grpc++/impl/grpc_library.h",
+ "include/grpc++/impl/client_unary_call.h",
+ "include/grpc++/impl/channel_argument_option.h",
+ "include/grpc++/impl/rpc_service_method.h",
+ "include/grpc++/impl/method_handler_impl.h",
+ "include/grpc++/impl/server_builder_plugin.h",
+ "include/grpc++/impl/server_initializer.h",
+ "include/grpc++/impl/serialization_traits.h",
+ "include/grpc++/impl/codegen/sync_stream.h",
+ "include/grpc++/impl/codegen/channel_interface.h",
+ "include/grpc++/impl/codegen/config_protobuf.h",
+ "include/grpc++/impl/codegen/status.h",
+ "include/grpc++/impl/codegen/core_codegen.h",
+ "include/grpc++/impl/codegen/config.h",
+ "include/grpc++/impl/codegen/core_codegen_interface.h",
+ "include/grpc++/impl/codegen/status_code_enum.h",
+ "include/grpc++/impl/codegen/metadata_map.h",
+ "include/grpc++/impl/codegen/rpc_method.h",
+ "include/grpc++/impl/codegen/server_context.h",
+ "include/grpc++/impl/codegen/byte_buffer.h",
+ "include/grpc++/impl/codegen/async_unary_call.h",
+ "include/grpc++/impl/codegen/server_interface.h",
+ "include/grpc++/impl/codegen/call.h",
+ "include/grpc++/impl/codegen/client_context.h",
+ "include/grpc++/impl/codegen/service_type.h",
+ "include/grpc++/impl/codegen/grpc_library.h",
+ "include/grpc++/impl/codegen/async_stream.h",
+ "include/grpc++/impl/codegen/slice.h",
+ "include/grpc++/impl/codegen/client_unary_call.h",
+ "include/grpc++/impl/codegen/proto_utils.h",
+ "include/grpc++/impl/codegen/stub_options.h",
+ "include/grpc++/impl/codegen/rpc_service_method.h",
+ "include/grpc++/impl/codegen/method_handler_impl.h",
+ "include/grpc++/impl/codegen/string_ref.h",
+ "include/grpc++/impl/codegen/completion_queue_tag.h",
+ "include/grpc++/impl/codegen/call_hook.h",
+ "include/grpc++/impl/codegen/completion_queue.h",
+ "include/grpc++/impl/codegen/serialization_traits.h",
+ "include/grpc++/impl/codegen/create_auth_context.h",
+ "include/grpc++/impl/codegen/time.h",
+ "include/grpc++/impl/codegen/security/auth_context.h",
+ "include/grpc++/ext/health_check_service_server_builder_option.h",
+ "include/grpc++/ext/proto_server_reflection_plugin.h",
+ "include/grpc++/generic/async_generic_service.h",
+ "include/grpc++/generic/generic_stub.h",
+ "include/grpc++/test/mock_stream.h",
+ "include/grpc++/test/server_context_test_spouse.h"
+]
+
+file_template = '''/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// DEPRECATED: The headers in include/grpc++ are deprecated. Please include the
+// headers in include/grpcpp instead. This header exists only for backwards
+// compatibility.
+
+#ifndef GRPCXX_FILE_PATH_NAME_UPPER
+#define GRPCXX_FILE_PATH_NAME_UPPER
+
+#include <grpcpp/FILE_PATH_NAME_LOWER>
+
+#endif // GRPCXX_FILE_PATH_NAME_UPPER
+'''
+
+errors = 0
+
+path_files = []
+for root, dirs, files in os.walk('include/grpc++'):
+ for filename in files:
+ path_file = os.path.join(root, filename)
+ path_files.append(path_file)
+
+if path_files.sort() != expected_files.sort():
+ diff_plus = [file for file in path_files if file not in expected_files]
+ diff_minus = [file for file in expected_files if file not in path_files]
+ for file in diff_minus:
+ print('- ', file)
+ for file in diff_plus:
+ print('+ ', file)
+ errors += 1
+
+if errors > 0:
+ sys.exit(errors)
+
+for path_file in expected_files:
+ relative_path_file = path_file.split('/', 2)[2]
+
+ replace_lower = relative_path_file.replace('+', 'p')
+
+ replace_upper = relative_path_file.replace('/', '_')
+ replace_upper = replace_upper.replace('.', '_')
+ replace_upper = replace_upper.upper().replace('+', 'X')
+
+ expected_content = file_template.replace('FILE_PATH_NAME_LOWER',
+ replace_lower)
+ expected_content = expected_content.replace('FILE_PATH_NAME_UPPER',
+ replace_upper)
+
+ path_file_expected = path_file + '.expected'
+ with open(path_file_expected, "w") as fo:
+ fo.write(expected_content)
+
+ if 0 != os.system('diff %s %s' % (path_file_expected, path_file)):
+ print('Difference found in file:', path_file)
+ errors += 1
+
+ os.remove(path_file_expected)
+
+check_extensions = [".h", ".cc", ".c", ".m"]
+
+for root, dirs, files in os.walk('src'):
+ for filename in files:
+ path_file = os.path.join(root, filename)
+ for ext in check_extensions:
+ if path_file.endswith(ext):
+ try:
+ with open(path_file, "r") as fi:
+ content = fi.read()
+ if '#include <grpc++/' in content:
+ print(
+ 'Failed: invalid include of deprecated headers in include/grpc++ in %s'
+ % path_file)
+ errors += 1
+ except IOError:
+ pass
+
+sys.exit(errors)
diff --git a/grpc/tools/run_tests/sanity/check_owners.sh b/grpc/tools/run_tests/sanity/check_owners.sh
new file mode 100755
index 00000000..de0e0925
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_owners.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+export TEST=true
+
+cd "$(dirname "$0")/../../.."
+
+owners=.github/CODEOWNERS
+want_owners=$(mktemp /tmp/submXXXXXX)
+
+tools/mkowners/mkowners.py -o "$want_owners"
+diff -u "$owners" "$want_owners"
+
+rm "$want_owners"
diff --git a/grpc/tools/run_tests/sanity/check_port_platform.py b/grpc/tools/run_tests/sanity/check_port_platform.py
new file mode 100755
index 00000000..79e7f9c4
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_port_platform.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+
+def check_port_platform_inclusion(directory_root):
+ bad_files = []
+ for root, dirs, files in os.walk(directory_root):
+ for filename in files:
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] not in ['.c', '.cc', '.h']: continue
+ if path in [
+ os.path.join('include', 'grpc', 'support',
+ 'port_platform.h'),
+ os.path.join('include', 'grpc', 'impl', 'codegen',
+ 'port_platform.h'),
+ ]:
+ continue
+ if filename.endswith('.pb.h') or filename.endswith('.pb.c'):
+ continue
+ # Skip check for upb generated code.
+ if filename.endswith('.upb.h') or filename.endswith('.upb.c'):
+ continue
+ with open(path) as f:
+ all_lines_in_file = f.readlines()
+ for index, l in enumerate(all_lines_in_file):
+ if '#include' in l:
+ if l not in [
+ '#include <grpc/support/port_platform.h>\n',
+ '#include <grpc/impl/codegen/port_platform.h>\n'
+ ]:
+ bad_files.append(path)
+ elif all_lines_in_file[index + 1] != '\n':
+ # Require a blank line after including port_platform.h in
+ # order to prevent the formatter from reording it's
+ # inclusion order upon future changes.
+ bad_files.append(path)
+ break
+ return bad_files
+
+
+all_bad_files = []
+all_bad_files += check_port_platform_inclusion(os.path.join('src', 'core'))
+all_bad_files += check_port_platform_inclusion(os.path.join('include', 'grpc'))
+
+if len(all_bad_files) > 0:
+ for f in all_bad_files:
+ print(('port_platform.h is not the first included header or there '
+ 'is not a blank line following its inclusion in %s') % f)
+ sys.exit(1)
diff --git a/grpc/tools/run_tests/sanity/check_qps_scenario_changes.py b/grpc/tools/run_tests/sanity/check_qps_scenario_changes.py
new file mode 100755
index 00000000..fffa813c
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_qps_scenario_changes.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import subprocess
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../../test/cpp/qps'))
+subprocess.call(['./json_run_localhost_scenario_gen.py'])
+subprocess.call(['./qps_json_driver_scenario_gen.py'])
+subprocess.call(['buildifier', '-v', '-r', '.'])
+
+output = subprocess.check_output(['git', 'status', '--porcelain'])
+qps_json_driver_bzl = 'test/cpp/qps/qps_json_driver_scenarios.bzl'
+json_run_localhost_bzl = 'test/cpp/qps/json_run_localhost_scenarios.bzl'
+
+if qps_json_driver_bzl in output or json_run_localhost_bzl in output:
+ print('qps benchmark scenarios have been updated, please commit '
+ 'test/cpp/qps/qps_json_driver_scenarios.bzl and/or '
+ 'test/cpp/qps/json_run_localhost_scenarios.bzl')
+ sys.exit(1)
diff --git a/grpc/tools/run_tests/sanity/check_shellcheck.sh b/grpc/tools/run_tests/sanity/check_shellcheck.sh
new file mode 100755
index 00000000..b94d8227
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_shellcheck.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+ROOT="$(dirname "$0")/../../.."
+
+DIRS=(
+ 'test'
+ 'tools/gce'
+ 'tools/run_tests'
+)
+
+for dir in "${DIRS[@]}"; do
+ find "$ROOT/$dir/" -name "*.sh" -type f -print0 | xargs -n1 -0 shellcheck
+done
diff --git a/grpc/tools/run_tests/sanity/check_submodules.sh b/grpc/tools/run_tests/sanity/check_submodules.sh
new file mode 100755
index 00000000..05a28eae
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_submodules.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+export TEST=true
+
+cd "$(dirname "$0")/../../.."
+
+submodules=$(mktemp /tmp/submXXXXXX)
+want_submodules=$(mktemp /tmp/submXXXXXX)
+
+git submodule | awk '{ print $1 }' | sort > "$submodules"
+cat << EOF | awk '{ print $1 }' | sort > "$want_submodules"
+ df3ea785d8c30a9503321a3d35ee7d35808f190d third_party/abseil-cpp (heads/master)
+ 090faecb454fbd6e6e17a75ef8146acb037118d4 third_party/benchmark (v1.5.0)
+ 73594cde8c9a52a102c4341c244c833aa61b9c06 third_party/bloaty (remotes/origin/wide-14-g73594cd)
+ 1c2769383f027befac5b75b6cedd25daf3bf4dcf third_party/boringssl-with-bazel (remotes/origin/master-with-bazel)
+ e982924acee7f7313b4baa4ee5ec000c5e373c30 third_party/cares/cares (cares-1_15_0)
+ 0487bbb43c3e8b54c7332f74ba7344d8265774f7 third_party/envoy-api (heads/master)
+ 28f50e0fed19872e0fd50dd23ce2ee8cd759338e third_party/gflags (v2.2.0-5-g30dbc81)
+ 80ed4d0bbf65d57cc267dfc63bd2584557f11f9b third_party/googleapis (common-protos-1_3_1-915-g80ed4d0bb)
+ c9ccac7cb7345901884aabf5d1a786cfa6e2f397 third_party/googletest (6e2f397)
+ 15ae750151ac9341e5945eb38f8982d59fb99201 third_party/libuv (v1.34.0)
+ fe1790ca0df67173702f70d5646b82f48f412b99 protobuf (v3.7.0-rc.2-756-gfe1790ca0)
+ c0a080f4bea50bc087cdd8551143ee538cf3459e third_party/protoc-gen-validate (v0.0.10)
+ db4b343e48c1264bb4d9ff491b059300701dc7c7 third_party/udpa (heads/master)
+ cacf7f1d4e3d44d871b605da3b647f07d718623f third_party/zlib (v1.2.11)
+EOF
+
+diff -u "$submodules" "$want_submodules"
+
+rm "$submodules" "$want_submodules"
diff --git a/grpc/tools/run_tests/sanity/check_test_filtering.py b/grpc/tools/run_tests/sanity/check_test_filtering.py
new file mode 100755
index 00000000..0b6b77ec
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_test_filtering.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import unittest
+import re
+
+# hack import paths to pick up extra code
+sys.path.insert(0, os.path.abspath('tools/run_tests/'))
+from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
+import python_utils.filter_pull_request_tests as filter_pull_request_tests
+
+_LIST_OF_LANGUAGE_LABELS = [
+ 'c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby'
+]
+_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
+
+
+class TestFilteringTest(unittest.TestCase):
+
+ def generate_all_tests(self):
+ all_jobs = _create_test_jobs() + _create_portability_test_jobs()
+ self.assertIsNotNone(all_jobs)
+ return all_jobs
+
+ def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
+ """
+ Default args should filter no tests because changed_files is empty and
+ default labels should be able to match all jobs
+ :param changed_files: mock list of changed_files from pull request
+ :param labels: list of job labels that should be skipped
+ """
+ all_jobs = self.generate_all_tests()
+
+ # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
+ def _get_changed_files(foo):
+ return changed_files
+
+ filter_pull_request_tests._get_changed_files = _get_changed_files
+ print()
+ filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
+
+ # Make sure sanity tests aren't being filtered out
+ sanity_tests_in_all_jobs = 0
+ sanity_tests_in_filtered_jobs = 0
+ for job in all_jobs:
+ if "sanity" in job.labels:
+ sanity_tests_in_all_jobs += 1
+ all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
+ for job in filtered_jobs:
+ if "sanity" in job.labels:
+ sanity_tests_in_filtered_jobs += 1
+ filtered_jobs = [
+ job for job in filtered_jobs if "sanity" not in job.labels
+ ]
+ self.assertEquals(sanity_tests_in_all_jobs,
+ sanity_tests_in_filtered_jobs)
+
+ for label in labels:
+ for job in filtered_jobs:
+ self.assertNotIn(label, job.labels)
+
+ jobs_matching_labels = 0
+ for label in labels:
+ for job in all_jobs:
+ if (label in job.labels):
+ jobs_matching_labels += 1
+ self.assertEquals(len(filtered_jobs),
+ len(all_jobs) - jobs_matching_labels)
+
+ def test_individual_language_filters(self):
+ # Changing unlisted file should trigger all languages
+ self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
+ # Changing core should trigger all tests
+ self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
+ # Testing individual languages
+ self.test_filtering(['test/core/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CORE_TEST_SUITE.labels +
+ filter_pull_request_tests._CPP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/cpp/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/csharp/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/objective-c/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/php/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/python/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/ruby/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+ ])
+
+ def test_combined_language_filters(self):
+ self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._CORE_TEST_SUITE.labels
+ ])
+ self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"], [
+ label for label in _LIST_OF_LANGUAGE_LABELS
+ if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+ ])
+ self.test_filtering([
+ 'src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar",
+ "src/ruby/foo.bar"
+ ], [
+ label for label in _LIST_OF_LANGUAGE_LABELS if
+ label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._PHP_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels and
+ label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+ ])
+
+ def test_platform_filter(self):
+ self.test_filtering(['vsprojects/foo.bar'], [
+ label for label in _LIST_OF_PLATFORM_LABELS
+ if label not in filter_pull_request_tests._WINDOWS_TEST_SUITE.labels
+ ])
+
+ def test_whitelist(self):
+ whitelist = filter_pull_request_tests._WHITELIST_DICT
+ files_that_should_trigger_all_tests = [
+ 'src/core/foo.bar', 'some_file_not_on_the_white_list', 'BUILD',
+ 'etc/roots.pem', 'Makefile', 'tools/foo'
+ ]
+ for key in whitelist.keys():
+ for file_name in files_that_should_trigger_all_tests:
+ self.assertFalse(re.match(key, file_name))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/grpc/tools/run_tests/sanity/check_tracer_sanity.py b/grpc/tools/run_tests/sanity/check_tracer_sanity.py
new file mode 100755
index 00000000..c4c76530
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_tracer_sanity.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import os
+import sys
+import re
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+errors = 0
+tracers = []
+pattern = re.compile("GRPC_TRACER_INITIALIZER\((true|false), \"(.*)\"\)")
+for root, dirs, files in os.walk('src/core'):
+ for filename in files:
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] != '.c': continue
+ with open(path) as f:
+ text = f.read()
+ for o in pattern.findall(text):
+ tracers.append(o[1])
+
+with open('doc/environment_variables.md') as f:
+ text = f.read()
+
+for t in tracers:
+ if t not in text:
+ print(
+ "ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md"
+ % t)
+ errors += 1
+
+assert errors == 0
diff --git a/grpc/tools/run_tests/sanity/check_version.py b/grpc/tools/run_tests/sanity/check_version.py
new file mode 100755
index 00000000..d53fa094
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/check_version.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import sys
+import yaml
+import os
+import re
+import subprocess
+
+errors = 0
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+# hack import paths to pick up extra code
+sys.path.insert(0, os.path.abspath('tools/buildgen/plugins'))
+from expand_version import Version
+
+try:
+ branch_name = subprocess.check_output('git rev-parse --abbrev-ref HEAD',
+ shell=True)
+except:
+ print('WARNING: not a git repository')
+ branch_name = None
+
+if branch_name is not None:
+ m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
+ if m:
+ print('RELEASE branch')
+ # version number should align with the branched version
+ check_version = lambda version: (version.major == int(m.group(1)) and
+ version.minor == int(m.group(2)))
+ warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (
+ m.group(1), m.group(2))
+ elif re.match(r'^debian/.*$', branch_name):
+ # no additional version checks for debian branches
+ check_version = lambda version: True
+ else:
+ # all other branches should have a -dev tag
+ check_version = lambda version: version.tag == 'dev'
+ warning = 'Version key "%s" value "%s" should have a -dev tag'
+else:
+ check_version = lambda version: True
+
+with open('build_handwritten.yaml', 'r') as f:
+ build_yaml = yaml.load(f.read())
+
+settings = build_yaml['settings']
+
+top_version = Version(settings['version'])
+if not check_version(top_version):
+ errors += 1
+ print(warning % ('version', top_version))
+
+for tag, value in settings.iteritems():
+ if re.match(r'^[a-z]+_version$', tag):
+ value = Version(value)
+ if tag != 'core_version':
+ if value.major != top_version.major:
+ errors += 1
+ print('major version mismatch on %s: %d vs %d' %
+ (tag, value.major, top_version.major))
+ if value.minor != top_version.minor:
+ errors += 1
+ print('minor version mismatch on %s: %d vs %d' %
+ (tag, value.minor, top_version.minor))
+ if not check_version(value):
+ errors += 1
+ print(warning % (tag, value))
+
+sys.exit(errors)
diff --git a/grpc/tools/run_tests/sanity/core_banned_functions.py b/grpc/tools/run_tests/sanity/core_banned_functions.py
new file mode 100755
index 00000000..934c4089
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/core_banned_functions.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import os
+import sys
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+
+# map of banned function signature to whitelist
+BANNED_EXCEPT = {
+ 'grpc_slice_from_static_buffer(': ['src/core/lib/slice/slice.cc'],
+ 'grpc_resource_quota_ref(': ['src/core/lib/iomgr/resource_quota.cc'],
+ 'grpc_resource_quota_unref(': [
+ 'src/core/lib/iomgr/resource_quota.cc', 'src/core/lib/surface/server.cc'
+ ],
+ 'grpc_slice_buffer_destroy(': ['src/core/lib/slice/slice_buffer.cc'],
+ 'grpc_slice_buffer_reset_and_unref(': [
+ 'src/core/lib/slice/slice_buffer.cc'
+ ],
+ 'grpc_slice_ref(': ['src/core/lib/slice/slice.cc'],
+ 'grpc_slice_unref(': ['src/core/lib/slice/slice.cc'],
+ 'grpc_error_create(': [
+ 'src/core/lib/iomgr/error.cc', 'src/core/lib/iomgr/error_cfstream.cc'
+ ],
+ 'grpc_error_ref(': ['src/core/lib/iomgr/error.cc'],
+ 'grpc_error_unref(': ['src/core/lib/iomgr/error.cc'],
+ 'grpc_os_error(': ['src/core/lib/iomgr/error.cc'],
+ 'grpc_wsa_error(': ['src/core/lib/iomgr/error.cc'],
+ 'grpc_log_if_error(': ['src/core/lib/iomgr/error.cc'],
+ 'grpc_slice_malloc(': ['src/core/lib/slice/slice.cc'],
+ 'grpc_call_cancel_internal(': ['src/core/lib/surface/call.cc'],
+ 'grpc_closure_create(': ['src/core/lib/iomgr/closure.cc'],
+ 'grpc_closure_init(': ['src/core/lib/iomgr/closure.cc'],
+ 'grpc_closure_sched(': ['src/core/lib/iomgr/closure.cc'],
+ 'grpc_closure_run(': ['src/core/lib/iomgr/closure.cc'],
+ 'grpc_closure_list_sched(': ['src/core/lib/iomgr/closure.cc'],
+}
+
+errors = 0
+num_files = 0
+for root, dirs, files in os.walk('src/core'):
+ if root.startswith('src/core/tsi'): continue
+ for filename in files:
+ num_files += 1
+ path = os.path.join(root, filename)
+ if os.path.splitext(path)[1] != '.cc': continue
+ with open(path) as f:
+ text = f.read()
+ for banned, exceptions in BANNED_EXCEPT.items():
+ if path in exceptions: continue
+ if banned in text:
+ print('Illegal use of "%s" in %s' % (banned, path))
+ errors += 1
+
+assert errors == 0
+# This check comes about from this issue:
+# https://github.com/grpc/grpc/issues/15381
+# Basically, a change rendered this script useless and we did not realize it.
+# This dumb check ensures that this type of issue doesn't occur again.
+assert num_files > 300 # we definitely have more than 300 files
diff --git a/grpc/tools/run_tests/sanity/core_untyped_structs.sh b/grpc/tools/run_tests/sanity/core_untyped_structs.sh
new file mode 100755
index 00000000..634d58df
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/core_untyped_structs.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+cd "$(dirname "$0")/../../.."
+
+#
+# Make sure that all core struct/unions have a name or are typedef'ed
+#
+
+grep -EIrn '(struct|union) *{' include/grpc |
+ grep -Ev typedef |
+ diff - /dev/null
+
diff --git a/grpc/tools/run_tests/sanity/cpp_banned_constructs.sh b/grpc/tools/run_tests/sanity/cpp_banned_constructs.sh
new file mode 100755
index 00000000..30852a7f
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/cpp_banned_constructs.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+cd "$(dirname "$0")/../../.."
+
+#
+# Prevent the use of synchronization and threading constructs from std:: since
+# the code should be using grpc_core::Mutex, grpc::internal::Mutex,
+# grpc_core::Thread, etc.
+#
+
+grep -EIrn \
+ 'std::(mutex|condition_variable|lock_guard|unique_lock|thread)' \
+ include/grpc include/grpcpp src/core src/cpp | \
+ grep -Ev include/grpcpp/impl/codegen/sync.h | \
+ diff - /dev/null
+
+#
+# Prevent the include of disallowed C++ headers.
+#
+
+grep -EIrn \
+ '^#include (<mutex>|<condition_variable>|<thread>|<ratio>|<filesystem>|<future>|<system_error>)' \
+ include/grpc include/grpcpp src/core src/cpp | \
+ grep -Ev include/grpcpp/impl/codegen/sync.h | \
+ diff - /dev/null
+
+#
+# Prevent the include of headers that shouldn't be used in tests.
+#
+
+grep -EIrn \
+ '^#include (<pthread.h>)' \
+ test | \
+ diff - /dev/null
diff --git a/grpc/tools/run_tests/sanity/sanity_tests.yaml b/grpc/tools/run_tests/sanity/sanity_tests.yaml
new file mode 100644
index 00000000..e8565439
--- /dev/null
+++ b/grpc/tools/run_tests/sanity/sanity_tests.yaml
@@ -0,0 +1,31 @@
+# a set of tests that are run in parallel for sanity tests
+- script: tools/run_tests/sanity/check_bad_dependencies.sh
+- script: tools/run_tests/sanity/check_bazel_workspace.py
+- script: tools/run_tests/sanity/check_buildifier.sh
+- script: tools/run_tests/sanity/check_cache_mk.sh
+- script: tools/run_tests/sanity/check_deprecated_grpc++.py
+- script: tools/run_tests/sanity/check_owners.sh
+- script: tools/run_tests/sanity/check_port_platform.py
+- script: tools/run_tests/sanity/check_qps_scenario_changes.py
+- script: tools/run_tests/sanity/check_shellcheck.sh
+- script: tools/run_tests/sanity/check_submodules.sh
+- script: tools/run_tests/sanity/check_test_filtering.py
+- script: tools/run_tests/sanity/check_tracer_sanity.py
+- script: tools/run_tests/sanity/core_banned_functions.py
+- script: tools/run_tests/sanity/core_untyped_structs.sh
+- script: tools/run_tests/sanity/cpp_banned_constructs.sh
+- script: tools/buildgen/generate_projects.sh -j 3
+ cpu_cost: 3
+- script: tools/distrib/check_copyright.py
+- script: tools/distrib/check_include_guards.py
+- script: tools/distrib/check_trailing_newlines.sh
+- script: tools/distrib/check_upb_output.sh
+- script: tools/distrib/check_pytype.sh
+- script: tools/distrib/clang_format_code.sh
+- script: tools/distrib/clang_tidy_code.sh
+- script: tools/distrib/pylint_code.sh
+- script: tools/distrib/python/check_grpcio_tools.py
+- script: tools/distrib/yapf_code.sh --diff
+ cpu_cost: 1000
+- script: tools/distrib/check_protobuf_pod_version.sh
+- script: tools/distrib/check_boringssl_prefix_symbol.sh
diff --git a/grpc/tools/run_tests/start_port_server.py b/grpc/tools/run_tests/start_port_server.py
new file mode 100755
index 00000000..cca9859d
--- /dev/null
+++ b/grpc/tools/run_tests/start_port_server.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Wrapper around port server starting code.
+
+Used by developers who wish to run individual C/C++ tests outside of the
+run_tests.py infrastructure.
+
+The path to this file is called out in test/core/util/port.c, and printed as
+an error message to users.
+"""
+
+import python_utils.start_port_server as start_port_server
+
+start_port_server.start_port_server()
+
+print("Port server started successfully")
diff --git a/grpc/tools/run_tests/task_runner.py b/grpc/tools/run_tests/task_runner.py
new file mode 100755
index 00000000..067e11a8
--- /dev/null
+++ b/grpc/tools/run_tests/task_runner.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Runs selected gRPC test/build tasks."""
+
+from __future__ import print_function
+
+import argparse
+import multiprocessing
+import sys
+
+import artifacts.artifact_targets as artifact_targets
+import artifacts.distribtest_targets as distribtest_targets
+import artifacts.package_targets as package_targets
+import python_utils.jobset as jobset
+import python_utils.report_utils as report_utils
+
+_TARGETS = []
+_TARGETS += artifact_targets.targets()
+_TARGETS += distribtest_targets.targets()
+_TARGETS += package_targets.targets()
+
+
+def _create_build_map():
+ """Maps task names and labels to list of tasks to be built."""
+ target_build_map = dict([(target.name, [target]) for target in _TARGETS])
+ if len(_TARGETS) > len(target_build_map.keys()):
+ raise Exception('Target names need to be unique')
+
+ label_build_map = {}
+ label_build_map['all'] = [t for t in _TARGETS] # to build all targets
+ for target in _TARGETS:
+ for label in target.labels:
+ if label in label_build_map:
+ label_build_map[label].append(target)
+ else:
+ label_build_map[label] = [target]
+
+ if set(target_build_map.keys()).intersection(label_build_map.keys()):
+ raise Exception('Target names need to be distinct from label names')
+ return dict(target_build_map.items() + label_build_map.items())
+
+
+_BUILD_MAP = _create_build_map()
+
+argp = argparse.ArgumentParser(description='Runs build/test targets.')
+argp.add_argument('-b',
+ '--build',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=['all'],
+ help='Target name or target label to build.')
+argp.add_argument('-f',
+ '--filter',
+ choices=sorted(_BUILD_MAP.keys()),
+ nargs='+',
+ default=[],
+ help='Filter targets to build with AND semantics.')
+argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
+argp.add_argument('-t',
+ '--travis',
+ default=False,
+ action='store_const',
+ const=True)
+
+args = argp.parse_args()
+
+# Figure out which targets to build
+targets = []
+for label in args.build:
+ targets += _BUILD_MAP[label]
+
+# Among targets selected by -b, filter out those that don't match the filter
+targets = [t for t in targets if all(f in t.labels for f in args.filter)]
+targets = sorted(set(targets))
+
+# Execute pre-build phase
+prebuild_jobs = []
+for target in targets:
+ prebuild_jobs += target.pre_build_jobspecs()
+if prebuild_jobs:
+ num_failures, _ = jobset.run(prebuild_jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs)
+ if num_failures != 0:
+ jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
+ sys.exit(1)
+
+build_jobs = []
+for target in targets:
+ build_jobs.append(target.build_jobspec())
+if not build_jobs:
+ print('Nothing to build.')
+ sys.exit(1)
+
+jobset.message('START', 'Building targets.', do_newline=True)
+num_failures, resultset = jobset.run(build_jobs,
+ newline_on_success=True,
+ maxjobs=args.jobs)
+report_utils.render_junit_xml_report(resultset,
+ 'report_taskrunner_sponge_log.xml',
+ suite_name='tasks')
+if num_failures == 0:
+ jobset.message('SUCCESS',
+ 'All targets built successfully.',
+ do_newline=True)
+else:
+ jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
+ sys.exit(1)