diff options
-rw-r--r-- | cr2/base.py | 172 | ||||
-rw-r--r-- | cr2/pid_controller.py | 4 | ||||
-rw-r--r-- | cr2/power.py | 6 | ||||
-rw-r--r-- | cr2/thermal.py | 172 | ||||
-rw-r--r-- | tests/test_base.py | 142 | ||||
-rw-r--r-- | tests/test_thermal.py | 130 |
6 files changed, 323 insertions, 303 deletions
diff --git a/cr2/base.py b/cr2/base.py new file mode 100644 index 0000000..112fdea --- /dev/null +++ b/cr2/base.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +"""Base class to parse trace.dat dumps""" + +import os +import re +import pandas as pd + +def trace_parser_explode_array(string, array_lengths): + """Explode an array in the trace into individual elements for easy parsing + + Basically, turn "load={1 1 2 2}" into "load0=1 load1=1 load2=2 + load3=2". array_lengths is a dictionary of array names and their + expected length. If we get array that's shorter than the expected + length, additional keys have to be introduced with value 0 to + compensate. For example, "load={1 2}" with array_lengths being + {"load": 4} returns "load0=1 load1=2 load2=0 load3=0" + + """ + + while True: + match = re.search(r"[^ ]+={[^}]+}", string) + if match is None: + break + + to_explode = match.group() + col_basename = re.match(r"([^=]+)=", to_explode).groups()[0] + vals_str = re.search(r"{(.+)}", to_explode).groups()[0] + vals_array = vals_str.split(' ') + + exploded_str = "" + for (idx, val) in enumerate(vals_array): + exploded_str += "{}{}={} ".format(col_basename, idx, val) + + vals_added = len(vals_array) + if vals_added < array_lengths[col_basename]: + for idx in range(vals_added, array_lengths[col_basename]): + exploded_str += "{}{}=0 ".format(col_basename, idx) + + exploded_str = exploded_str[:-1] + begin_idx = match.start() + end_idx = match.end() + + string = string[:begin_idx] + exploded_str + string[end_idx:] + + return string + +class Base(object): + """Base class to parse trace.dat dumps. + + Don't use directly, create a subclass that defines the unique_word + you want to match in the output""" + def __init__(self, basepath, unique_word): + if basepath is None: + basepath = "." + + self.basepath = basepath + self.data_frame = pd.DataFrame() + self.unique_word = unique_word + + if not os.path.isfile(os.path.join(basepath, "trace.txt")): + self.__run_trace_cmd_report() + + self.__parse_into_dataframe() + + def __run_trace_cmd_report(self): + """Run "trace-cmd report > trace.txt". + + Overwrites the contents of trace.txt if it exists.""" + from subprocess import check_output + + trace_fname = os.path.join(self.basepath, "trace.dat") + if not os.path.isfile(trace_fname): + raise IOError("No such file or directory: {}".format(trace_fname)) + + with open(os.devnull) as devnull: + out = check_output(["trace-cmd", "report", trace_fname], + stderr=devnull) + + with open(os.path.join(self.basepath, "trace.txt"), "w") as fout: + fout.write(out) + + def get_trace_array_lengths(self, fname): + """Calculate the lengths of all arrays in the trace + + Returns a dict with the name of each array found in the trace + as keys and their corresponding length as value + + """ + from collections import defaultdict + + pat_array = re.compile(r"([A-Za-z0-9_]+)={([^}]+)}") + + ret = defaultdict(int) + + with open(fname) as fin: + for line in fin: + if not re.search(self.unique_word, line): + continue + + while True: + match = re.search(pat_array, line) + if not match: + break + + (array_name, array_elements) = match.groups() + + array_len = len(array_elements.split(' ')) + + if array_len > ret[array_name]: + ret[array_name] = array_len + + line = line[match.end():] + + return ret + + def __parse_into_dataframe(self): + """parse the trace and create a pandas DataFrame""" + + fin_fname = os.path.join(self.basepath, "trace.txt") + + array_lengths = self.get_trace_array_lengths(fin_fname) + + pat_timestamp = re.compile(r"([0-9]+\.[0-9]+):") + pat_data_start = re.compile("[A-Za-z0-9_]+=") + pat_empty_array = re.compile(r"[A-Za-z0-9_]+=\{\} ") + + parsed_data = [] + time_array = [] + + with open(fin_fname) as fin: + for line in fin: + if not re.search(self.unique_word, line): + continue + + line = line[:-1] + + timestamp_match = re.search(pat_timestamp, line) + timestamp = float(timestamp_match.group(1)) + time_array.append(timestamp) + + data_start_idx = re.search(pat_data_start, line).start() + data_str = line[data_start_idx:] + + # Remove empty arrays from the trace + data_str = re.sub(pat_empty_array, r"", data_str) + + data_str = trace_parser_explode_array(data_str, array_lengths) + + line_data = {} + for field in data_str.split(): + (key, value) = field.split('=') + try: + value = int(value) + except ValueError: + pass + line_data[key] = value + + parsed_data.append(line_data) + + time_idx = pd.Index(time_array, name="Time") + self.data_frame = pd.DataFrame(parsed_data, index=time_idx) + + def write_csv(self, fname): + """Write the csv info in thermal.csv""" + self.data_frame.to_csv(fname) + + def normalize_time(self, basetime): + """Substract basetime from the Time of the data frame""" + if basetime: + self.data_frame.reset_index(inplace=True) + self.data_frame["Time"] = self.data_frame["Time"] - basetime + self.data_frame.set_index("Time", inplace=True) diff --git a/cr2/pid_controller.py b/cr2/pid_controller.py index b23c9b1..2fb2aef 100644 --- a/cr2/pid_controller.py +++ b/cr2/pid_controller.py @@ -2,10 +2,10 @@ """Process the output of the power allocator's PID controller in the current directory's trace.dat""" -from thermal import BaseThermal +from base import Base from plot_utils import normalize_title, pre_plot_setup, post_plot_setup -class PIDController(BaseThermal): +class PIDController(Base): """Process the power allocator PID controller data in a ftrace dump""" def __init__(self, path=None): super(PIDController, self).__init__( diff --git a/cr2/power.py b/cr2/power.py index 4c9413d..3efd524 100644 --- a/cr2/power.py +++ b/cr2/power.py @@ -5,7 +5,7 @@ directory's trace.dat""" from matplotlib import pyplot as plt import pandas as pd -from thermal import BaseThermal +from base import Base from plot_utils import normalize_title, pre_plot_setup, post_plot_setup def pivot_with_labels(dfr, data_col_name, new_col_name, mapping_label): @@ -64,7 +64,7 @@ def pivot_with_labels(dfr, data_col_name, new_col_name, mapping_label): return pd.DataFrame(ret_series).fillna(method="pad") -class OutPower(BaseThermal): +class OutPower(Base): """Process the cpufreq cooling power actor data in a ftrace dump""" def __init__(self, path=None): @@ -84,7 +84,7 @@ class OutPower(BaseThermal): return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000 -class InPower(BaseThermal): +class InPower(Base): """Process the cpufreq cooling power actor data in a ftrace dump""" def __init__(self, path=None): diff --git a/cr2/thermal.py b/cr2/thermal.py index 86e8590..7d23f3f 100644 --- a/cr2/thermal.py +++ b/cr2/thermal.py @@ -4,178 +4,12 @@ directory's trace.dat""" import os import re -import pandas as pd from matplotlib import pyplot as plt +from base import Base from plot_utils import normalize_title, pre_plot_setup, post_plot_setup, plot_hist -def trace_parser_explode_array(string, array_lengths): - """Explode an array in the trace into individual elements for easy parsing - - Basically, turn "load={1 1 2 2}" into "load0=1 load1=1 load2=2 - load3=2". array_lengths is a dictionary of array names and their - expected length. If we get array that's shorter than the expected - length, additional keys have to be introduced with value 0 to - compensate. For example, "load={1 2}" with array_lengths being - {"load": 4} returns "load0=1 load1=2 load2=0 load3=0" - - """ - - while True: - match = re.search(r"[^ ]+={[^}]+}", string) - if match is None: - break - - to_explode = match.group() - col_basename = re.match(r"([^=]+)=", to_explode).groups()[0] - vals_str = re.search(r"{(.+)}", to_explode).groups()[0] - vals_array = vals_str.split(' ') - - exploded_str = "" - for (idx, val) in enumerate(vals_array): - exploded_str += "{}{}={} ".format(col_basename, idx, val) - - vals_added = len(vals_array) - if vals_added < array_lengths[col_basename]: - for idx in range(vals_added, array_lengths[col_basename]): - exploded_str += "{}{}=0 ".format(col_basename, idx) - - exploded_str = exploded_str[:-1] - begin_idx = match.start() - end_idx = match.end() - - string = string[:begin_idx] + exploded_str + string[end_idx:] - - return string - -class BaseThermal(object): - """Base class to parse trace.dat dumps. - - Don't use directly, create a subclass that defines the unique_word - you want to match in the output""" - def __init__(self, basepath, unique_word): - if basepath is None: - basepath = "." - - self.basepath = basepath - self.data_frame = pd.DataFrame() - self.unique_word = unique_word - - if not os.path.isfile(os.path.join(basepath, "trace.txt")): - self.__run_trace_cmd_report() - - self.__parse_into_dataframe() - - def __run_trace_cmd_report(self): - """Run "trace-cmd report > trace.txt". - - Overwrites the contents of trace.txt if it exists.""" - from subprocess import check_output - - trace_fname = os.path.join(self.basepath, "trace.dat") - if not os.path.isfile(trace_fname): - raise IOError("No such file or directory: {}".format(trace_fname)) - - with open(os.devnull) as devnull: - out = check_output(["trace-cmd", "report", trace_fname], - stderr=devnull) - - with open(os.path.join(self.basepath, "trace.txt"), "w") as fout: - fout.write(out) - - def get_trace_array_lengths(self, fname): - """Calculate the lengths of all arrays in the trace - - Returns a dict with the name of each array found in the trace - as keys and their corresponding length as value - - """ - from collections import defaultdict - - pat_array = re.compile(r"([A-Za-z0-9_]+)={([^}]+)}") - - ret = defaultdict(int) - - with open(fname) as fin: - for line in fin: - if not re.search(self.unique_word, line): - continue - - while True: - match = re.search(pat_array, line) - if not match: - break - - (array_name, array_elements) = match.groups() - - array_len = len(array_elements.split(' ')) - - if array_len > ret[array_name]: - ret[array_name] = array_len - - line = line[match.end():] - - return ret - - def __parse_into_dataframe(self): - """parse the trace and create a pandas DataFrame""" - - fin_fname = os.path.join(self.basepath, "trace.txt") - - array_lengths = self.get_trace_array_lengths(fin_fname) - - pat_timestamp = re.compile(r"([0-9]+\.[0-9]+):") - pat_data_start = re.compile("[A-Za-z0-9_]+=") - pat_empty_array = re.compile(r"[A-Za-z0-9_]+=\{\} ") - - parsed_data = [] - time_array = [] - - with open(fin_fname) as fin: - for line in fin: - if not re.search(self.unique_word, line): - continue - - line = line[:-1] - - timestamp_match = re.search(pat_timestamp, line) - timestamp = float(timestamp_match.group(1)) - time_array.append(timestamp) - - data_start_idx = re.search(pat_data_start, line).start() - data_str = line[data_start_idx:] - - # Remove empty arrays from the trace - data_str = re.sub(pat_empty_array, r"", data_str) - - data_str = trace_parser_explode_array(data_str, array_lengths) - - line_data = {} - for field in data_str.split(): - (key, value) = field.split('=') - try: - value = int(value) - except ValueError: - pass - line_data[key] = value - - parsed_data.append(line_data) - - time_idx = pd.Index(time_array, name="Time") - self.data_frame = pd.DataFrame(parsed_data, index=time_idx) - - def write_csv(self, fname): - """Write the csv info in thermal.csv""" - self.data_frame.to_csv(fname) - - def normalize_time(self, basetime): - """Substract basetime from the Time of the data frame""" - if basetime: - self.data_frame.reset_index(inplace=True) - self.data_frame["Time"] = self.data_frame["Time"] - basetime - self.data_frame.set_index("Time", inplace=True) - -class Thermal(BaseThermal): +class Thermal(Base): """Process the thermal framework data in a ftrace dump""" def __init__(self, path=None): super(Thermal, self).__init__( @@ -220,7 +54,7 @@ class Thermal(BaseThermal): plot_hist(temps, ax, title, 30, "Temperature", xlim, "default") -class ThermalGovernor(BaseThermal): +class ThermalGovernor(Base): """Process the power allocator data in a ftrace dump""" def __init__(self, path=None): super(ThermalGovernor, self).__init__( diff --git a/tests/test_base.py b/tests/test_base.py new file mode 100644 index 0000000..62fa9d0 --- /dev/null +++ b/tests/test_base.py @@ -0,0 +1,142 @@ +#!/usr/bin/python + +import unittest +import os +import sys + +import utils_tests +import cr2 +from cr2.base import Base +from cr2.base import trace_parser_explode_array + +sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "cr2")) + +class TestBaseMethods(unittest.TestCase): + """Test simple methods that don't need to set up a directory""" + def test_trace_parser_explode_array(self): + """Basic test of trace_parser_explode_array()""" + + line = "cpus=0000000f freq=1400000 raw_cpu_power=189 load={3 2 12 2} power=14" + expected = "cpus=0000000f freq=1400000 raw_cpu_power=189 load0=3 load1=2 load2=12 load3=2 power=14" + array_lengths = {"load": 4} + + result = trace_parser_explode_array(line, array_lengths) + self.assertEquals(result, expected) + + def test_trace_parser_explode_array_nop(self): + """trace_parser_explode_array() returns the same string if there's no array in it""" + + line = "cpus=0000000f freq=1400000 raw_cpu_power=189 load0=3 load1=2 load2=12 load3=2 power=14" + array_lengths = {"load": 0} + + result = trace_parser_explode_array(line, array_lengths) + self.assertEquals(result, line) + + def test_trace_parser_explode_array_2(self): + """trace_parser_explode_array() works if there's two arrays in the string""" + + line = "cpus=0000000f freq=1400000 load={3 2 12 2} power=14 req_power={10 7 2 34}" + expected = "cpus=0000000f freq=1400000 load0=3 load1=2 load2=12 load3=2 power=14 req_power0=10 req_power1=7 req_power2=2 req_power3=34" + array_lengths = {'load': 4, 'req_power': 4} + + result = trace_parser_explode_array(line, array_lengths) + self.assertEquals(result, expected) + + def test_trace_parser_explode_array_diff_lengths(self): + """trace_parser_explode_array() expands arrays that are shorter than the expected length + + trace_parser_explode_array() has to be able to deal with an + array of size 2 if we tell it in other parts of the trace it + is four. + + """ + + line = "cpus=0000000f freq=1400000 load={3 2} power=14" + expected = "cpus=0000000f freq=1400000 load0=3 load1=2 load2=0 load3=0 power=14" + array_lengths = {'load': 4} + + result = trace_parser_explode_array(line, array_lengths) + self.assertEquals(result, expected) + +class TestBase(utils_tests.SetupDirectory): + """Incomplete tests for the Base class""" + + def __init__(self, *args, **kwargs): + super(TestBase, self).__init__( + ["trace.txt", "trace_empty.txt"], + *args, + **kwargs) + + def test_get_trace_array_lengths(self): + """TestBase: InPower.get_trace_array_lengths()""" + + in_data = """ kworker/4:1-397 [004] 720.741315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={1 2} power=61 + kworker/4:1-397 [004] 720.741349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={1 3 4 89} power=14 + kworker/4:1-397 [004] 720.841315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={1 2} power=61 + kworker/4:1-397 [004] 720.841349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={} power=14 +""" + with open("trace.txt", "w") as fout: + fout.write(in_data) + + base = Base(".", "thermal_power_actor_cpu_get_dyn_power") + lengths = base.get_trace_array_lengths("trace.txt") + + self.assertEquals(len(lengths), 1) + self.assertEquals(lengths["load"], 4) + + def test_parse_empty_array(self): + """TestBase: Test that trace that has an empty array creates a valid DataFrame""" + + in_data = """ kworker/4:1-397 [004] 720.741315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={} power=61 + kworker/4:1-397 [004] 720.741349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={} power=14""" + expected_columns = set(["cpus", "freq", "raw_cpu_power", "power"]) + + with open("trace.txt", "w") as fout: + fout.write(in_data) + + base = Base(".", "thermal_power_actor_cpu_get_dyn_power") + dfr = base.data_frame + + self.assertEquals(set(dfr.columns), expected_columns) + self.assertEquals(dfr["power"].iloc[0], 61) + + def test_get_dataframe(self): + """TestBase: Thermal.data_frame["thermal_zone"] exists and + Thermal.data_frame["temp"][0] = 24000""" + dfr = cr2.Run().thermal.data_frame + + self.assertTrue("thermal_zone" in dfr.columns) + self.assertEquals(dfr["temp"].iloc[0], 24000) + + def test_write_csv(self): + """TestBase: Test that write_csv() creates a valid csv""" + from csv import DictReader + + fname = "thermal.csv" + cr2.Run().thermal.write_csv(fname) + + with open(fname) as fin: + csv_reader = DictReader(fin) + + self.assertTrue("Time" in csv_reader.fieldnames) + self.assertTrue("temp" in csv_reader.fieldnames) + + first_data = csv_reader.next() + self.assertEquals(first_data["Time"], "0.0") + self.assertEquals(first_data["temp"], "24000") + + def test_normalize_time(self): + """TestBase: Test that normalize_time() normalizes the time of the trace""" + thrm = cr2.Run().thermal + + last_prev_time = thrm.data_frame.index[-1] + + basetime = thrm.data_frame.index[0] + thrm.normalize_time(basetime) + + last_time = thrm.data_frame.index[-1] + expected_last_time = last_prev_time - basetime + + self.assertEquals(round(thrm.data_frame.index[0], 7), 0) + self.assertEquals(round(last_time - expected_last_time, 7), 0) + diff --git a/tests/test_thermal.py b/tests/test_thermal.py index 2de2345..ab7f99b 100644 --- a/tests/test_thermal.py +++ b/tests/test_thermal.py @@ -4,63 +4,14 @@ import unittest import matplotlib import os import pandas as pd -import re import shutil import sys import tempfile import utils_tests import cr2 -sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "cr2")) -import thermal - -class TestThermalMethods(unittest.TestCase): - """Test simple methods that don't need to set up a directory""" - def test_trace_parser_explode_array(self): - """Basic test of trace_parser_explode_array()""" - - line = "cpus=0000000f freq=1400000 raw_cpu_power=189 load={3 2 12 2} power=14" - expected = "cpus=0000000f freq=1400000 raw_cpu_power=189 load0=3 load1=2 load2=12 load3=2 power=14" - array_lengths = {"load": 4} - - result = thermal.trace_parser_explode_array(line, array_lengths) - self.assertEquals(result, expected) - - def test_trace_parser_explode_array_nop(self): - """trace_parser_explode_array() returns the same string if there's no array in it""" - - line = "cpus=0000000f freq=1400000 raw_cpu_power=189 load0=3 load1=2 load2=12 load3=2 power=14" - array_lengths = {"load": 0} - - result = thermal.trace_parser_explode_array(line, array_lengths) - self.assertEquals(result, line) - - def test_trace_parser_explode_array_2(self): - """trace_parser_explode_array() works if there's two arrays in the string""" - - line = "cpus=0000000f freq=1400000 load={3 2 12 2} power=14 req_power={10 7 2 34}" - expected = "cpus=0000000f freq=1400000 load0=3 load1=2 load2=12 load3=2 power=14 req_power0=10 req_power1=7 req_power2=2 req_power3=34" - array_lengths = {'load': 4, 'req_power': 4} - - result = thermal.trace_parser_explode_array(line, array_lengths) - self.assertEquals(result, expected) - - def test_trace_parser_explode_array_diff_lengths(self): - """trace_parser_explode_array() expands arrays that are shorter than -the expected length - trace_parser_explode_array() has to be able to deal with an - array of size 2 if we tell it in other parts of the trace it - is four. - - """ - - line = "cpus=0000000f freq=1400000 load={3 2} power=14" - expected = "cpus=0000000f freq=1400000 load0=3 load1=2 load2=0 load3=0 power=14" - array_lengths = {'load': 4} - - result = thermal.trace_parser_explode_array(line, array_lengths) - self.assertEquals(result, expected) +sys.path.append(os.path.join(utils_tests.TESTS_DIRECTORY, "..", "cr2")) class BaseTestThermal(utils_tests.SetupDirectory): def __init__(self, *args, **kwargs): @@ -69,71 +20,7 @@ class BaseTestThermal(utils_tests.SetupDirectory): *args, **kwargs) -class TestThermalBase(utils_tests.SetupDirectory): - """Incomplete tests for the ThermalBase class""" - - def __init__(self, *args, **kwargs): - super(TestThermalBase, self).__init__( - [], - *args, - **kwargs) - - def test_get_trace_array_lengths(self): - """Test InPower.get_trace_array_lengths()""" - - in_data = """ kworker/4:1-397 [004] 720.741315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={1 2} power=61 - kworker/4:1-397 [004] 720.741349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={1 3 4 89} power=14 - kworker/4:1-397 [004] 720.841315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={1 2} power=61 - kworker/4:1-397 [004] 720.841349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={} power=14 -""" - with open("trace.txt", "w") as fout: - fout.write(in_data) - - base = thermal.BaseThermal(".", "thermal_power_actor_cpu_get_dyn_power") - lengths = base.get_trace_array_lengths("trace.txt") - - self.assertEquals(len(lengths), 1) - self.assertEquals(lengths["load"], 4) - - def test_parse_empty_array(self): - """Test that trace that has an empty array creates a valid DataFrame""" - - in_data = """ kworker/4:1-397 [004] 720.741315: thermal_power_actor_cpu_get_dyn_power: cpus=000000f0 freq=1900000 raw_cpu_power=1259 load={} power=61 - kworker/4:1-397 [004] 720.741349: thermal_power_actor_cpu_get_dyn_power: cpus=0000000f freq=1400000 raw_cpu_power=189 load={} power=14""" - expected_columns = set(["cpus", "freq", "raw_cpu_power", "power"]) - - with open("trace.txt", "w") as fout: - fout.write(in_data) - - base = thermal.BaseThermal(".", "thermal_power_actor_cpu_get_dyn_power") - dfr = base.data_frame - - self.assertEquals(set(dfr.columns), expected_columns) - self.assertEquals(dfr["power"].iloc[0], 61) - class TestThermal(BaseTestThermal): - def test_get_dataframe(self): - dfr = cr2.Run().thermal.data_frame - - self.assertTrue("thermal_zone" in dfr.columns) - self.assertEquals(dfr["temp"].iloc[0], 24000) - - def test_write_csv(self): - """BaseThermal().write_csv() creates a valid csv""" - from csv import DictReader - - fname = "thermal.csv" - cr2.Run().thermal.write_csv(fname) - - with open(fname) as fin: - csv_reader = DictReader(fin) - - self.assertTrue("Time" in csv_reader.fieldnames) - self.assertTrue("temp" in csv_reader.fieldnames) - - first_data = csv_reader.next() - self.assertEquals(first_data["Time"], "0.0") - self.assertEquals(first_data["temp"], "24000") def test_plot_temperature(self): """Test ThermalGovernor.plot_temperature() @@ -167,21 +54,6 @@ class TestThermal(BaseTestThermal): cr2.Run().thermal.plot_temperature_hist(ax, "Foo") matplotlib.pyplot.close('all') - def test_normalize_time(self): - """BaseThermal.normalize_time() normalizes the time of the trace""" - thrm = cr2.Run().thermal - - last_prev_time = thrm.data_frame.index[-1] - - basetime = thrm.data_frame.index[0] - thrm.normalize_time(basetime) - - last_time = thrm.data_frame.index[-1] - expected_last_time = last_prev_time - basetime - - self.assertEquals(round(thrm.data_frame.index[0], 7), 0) - self.assertEquals(round(last_time - expected_last_time, 7), 0) - class TestThermalGovernor(BaseTestThermal): def __init__(self, *args, **kwargs): super(TestThermalGovernor, self).__init__(*args, **kwargs) |