aboutsummaryrefslogtreecommitdiff
path: root/mem_tests
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9 /mem_tests
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'mem_tests')
-rwxr-xr-xmem_tests/clean_data.py11
-rwxr-xr-xmem_tests/mem_groups.py47
-rwxr-xr-xmem_tests/total_mem_actual.py23
-rwxr-xr-xmem_tests/total_mem_sampled.py17
-rw-r--r--mem_tests/utils.py13
5 files changed, 53 insertions, 58 deletions
diff --git a/mem_tests/clean_data.py b/mem_tests/clean_data.py
index dc8a7b71..f9a11e75 100755
--- a/mem_tests/clean_data.py
+++ b/mem_tests/clean_data.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Cleans output from other scripts to eliminate duplicates.
When frequently sampling data, we see that records occasionally will contain
@@ -15,16 +14,16 @@ standard time.
import argparse
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("clean2.csv", "a")
+output_file = open('clean2.csv', 'a')
dictionary = dict()
for line in my_file:
- new_time = int(line.split(",")[0])
- dictionary[new_time] = line
+ new_time = int(line.split(',')[0])
+ dictionary[new_time] = line
for key in dictionary.keys():
- output_file.write(dictionary[key])
+ output_file.write(dictionary[key])
diff --git a/mem_tests/mem_groups.py b/mem_tests/mem_groups.py
index 75591182..6de76914 100755
--- a/mem_tests/mem_groups.py
+++ b/mem_tests/mem_groups.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Groups memory by allocation sizes.
Takes a log entry and sorts sorts everything into groups based on what size
@@ -20,37 +19,37 @@ from datetime import datetime
pretty_print = True
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("groups.csv", "a")
+output_file = open('groups.csv', 'a')
# The cutoffs for each group in the output (in bytes)
groups = [1024, 8192, 65536, 524288, 4194304]
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "heap profile:" in line:
- if half_entry[0] is not None:
- group_totals = half_entry[1]
- total = sum(group_totals) * 1.0
- to_join = [half_entry[0]] + [value / total for value in group_totals]
- to_output = ",".join([str(elem) for elem in to_join])
- output_file.write(to_output)
- total_diff = compute_total_diff(line, base_time)
- half_entry = (total_diff, [0]*(len(groups) + 1))
- if "] @ " in line and "heap profile:" not in line:
- mem_samples = line.strip().split("[")[0]
- num_samples, total_mem = map(int, mem_samples.strip().split(":"))
- mem_per_sample = total_mem // num_samples
- group_totals = half_entry[1]
- for cutoff_index in range(len(groups)):
- if mem_per_sample <= groups[cutoff_index]:
- group_totals[cutoff_index] += total_mem
- break
- if mem_per_sample > groups[-1]:
- group_totals[-1] += total_mem
+ if 'heap profile:' in line:
+ if half_entry[0] is not None:
+ group_totals = half_entry[1]
+ total = sum(group_totals) * 1.0
+ to_join = [half_entry[0]] + [value / total for value in group_totals]
+ to_output = ','.join([str(elem) for elem in to_join])
+ output_file.write(to_output)
+ total_diff = compute_total_diff(line, base_time)
+ half_entry = (total_diff, [0] * (len(groups) + 1))
+ if '] @ ' in line and 'heap profile:' not in line:
+ mem_samples = line.strip().split('[')[0]
+ num_samples, total_mem = map(int, mem_samples.strip().split(':'))
+ mem_per_sample = total_mem // num_samples
+ group_totals = half_entry[1]
+ for cutoff_index in range(len(groups)):
+ if mem_per_sample <= groups[cutoff_index]:
+ group_totals[cutoff_index] += total_mem
+ break
+ if mem_per_sample > groups[-1]:
+ group_totals[-1] += total_mem
diff --git a/mem_tests/total_mem_actual.py b/mem_tests/total_mem_actual.py
index c9c51b16..2e836e88 100755
--- a/mem_tests/total_mem_actual.py
+++ b/mem_tests/total_mem_actual.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Parses the actual memory usage from TCMalloc.
This goes through logs that have the actual allocated memory (not sampled) in
@@ -17,22 +16,22 @@ from datetime import datetime
pretty_print = True
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("raw_memory_data.csv", "a")
+output_file = open('raw_memory_data.csv', 'a')
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "Output Heap Stats:" in line:
- total_diff = compute_total_diff(line, base_time)
- half_entry = (total_diff, None)
- if "Bytes in use by application" in line:
- total_diff = half_entry[0]
- memory_used = int(line.strip().split()[1])
- half_entry = (None, None)
- output_file.write("{0},{1}\n".format(total_diff, memory_used))
+ if 'Output Heap Stats:' in line:
+ total_diff = compute_total_diff(line, base_time)
+ half_entry = (total_diff, None)
+ if 'Bytes in use by application' in line:
+ total_diff = half_entry[0]
+ memory_used = int(line.strip().split()[1])
+ half_entry = (None, None)
+ output_file.write('{0},{1}\n'.format(total_diff, memory_used))
diff --git a/mem_tests/total_mem_sampled.py b/mem_tests/total_mem_sampled.py
index f8ed8013..c7336473 100755
--- a/mem_tests/total_mem_sampled.py
+++ b/mem_tests/total_mem_sampled.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Parses the total amount of sampled memory from log files.
This file outputs the total amount of memory that has been sampled by tcmalloc.
@@ -14,19 +13,19 @@ from utils import compute_total_diff
from datetime import datetime
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("memory_data.csv", "a")
+output_file = open('memory_data.csv', 'a')
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "heap profile: " not in line:
- continue
- memory_used = line.strip().split(":")[-1].strip().split("]")[0].strip()
- total_diff = compute_total_diff(line, base_time)
- output_file.write("{0},{1}\n".format(int(total_diff), memory_used))
+ if 'heap profile: ' not in line:
+ continue
+ memory_used = line.strip().split(':')[-1].strip().split(']')[0].strip()
+ total_diff = compute_total_diff(line, base_time)
+ output_file.write('{0},{1}\n'.format(int(total_diff), memory_used))
diff --git a/mem_tests/utils.py b/mem_tests/utils.py
index 54dbcc2d..38bd89ca 100644
--- a/mem_tests/utils.py
+++ b/mem_tests/utils.py
@@ -1,12 +1,11 @@
-#! /usr/bin/python
-
"""Utility functions for the memory tests.
"""
from datetime import datetime
+
def compute_total_diff(line, base_time):
- """
+ """
Computes the difference in time the line was recorded from the base time.
An example of a line is:
@@ -17,7 +16,7 @@ def compute_total_diff(line, base_time):
line- the line that contains the time the record was taken
base_time- the base time to measure our timestamp from
"""
- date = line.strip().split(":")[2].split("/")
- timestamp = datetime(2014, int(date[0][0:2]), int(date[0][2:4]),
- int(date[1][0:2]), int(date[1][2:4]), int(date[1][4:6]))
- return (timestamp - base_time).total_seconds()
+ date = line.strip().split(':')[2].split('/')
+ timestamp = datetime(2014, int(date[0][0:2]), int(date[0][2:4]),
+ int(date[1][0:2]), int(date[1][2:4]), int(date[1][4:6]))
+ return (timestamp - base_time).total_seconds()