summaryrefslogtreecommitdiff
path: root/grpc/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
Diffstat (limited to 'grpc/tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xgrpc/tools/run_tests/run_microbenchmark.py23
1 files changed, 12 insertions, 11 deletions
diff --git a/grpc/tools/run_tests/run_microbenchmark.py b/grpc/tools/run_tests/run_microbenchmark.py
index a275e16b..7cd4b966 100755
--- a/grpc/tools/run_tests/run_microbenchmark.py
+++ b/grpc/tools/run_tests/run_microbenchmark.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,12 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import cgi
+import argparse
+import html
import multiprocessing
import os
import subprocess
import sys
-import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
@@ -41,7 +41,8 @@ def fnize(s):
out = ''
for c in s:
if c in '<>, /':
- if len(out) and out[-1] == '_': continue
+ if len(out) and out[-1] == '_':
+ continue
out += '_'
else:
out += c
@@ -65,13 +66,13 @@ def heading(name):
def link(txt, tgt):
global index_html
- index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
- tgt, quote=True), cgi.escape(txt))
+ index_html += "<p><a href=\"%s\">%s</a></p>\n" % (html.escape(
+ tgt, quote=True), html.escape(txt))
def text(txt):
global index_html
- index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+ index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt)
def _bazel_build_benchmark(bm_name, cfg):
@@ -94,7 +95,7 @@ def collect_latency(bm_name, args):
for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests'
- ]).splitlines():
+ ]).decode('UTF-8').splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec([
@@ -148,7 +149,7 @@ def collect_perf(bm_name, args):
for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests'
- ]).splitlines():
+ ]).decode('UTF-8').splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec([
@@ -198,7 +199,7 @@ def run_summary(bm_name, cfg, base_json_name):
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
- return subprocess.check_output(cmd)
+ return subprocess.check_output(cmd).decode('UTF-8')
def collect_summary(bm_name, args):
@@ -213,7 +214,7 @@ def collect_summary(bm_name, args):
'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name
- ]))
+ ]).decode('UTF-8'))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name