From daed7a4b54bae5fe524e7ce0fd43123a78a39790 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 17 Jun 2015 15:47:11 +0100 Subject: sheye: SchedAssert: Introduction Sheye or Sched Eye is an assertion framework built on top of the statistics framework on top of cr2 and contains scheduler specific assertions for automating scheduler behavioural analysis. This patch Introduces the residency assertion. Change-Id: I7413b9f7f9a13a56f63e9c1b1bf8c467a7306bc2 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 170 ++++++++++++++++++++++++++++++++++++++++++++++++++++ bart/Utils.py | 28 +++++++++ bart/__init__.py | 17 ++++++ 3 files changed, 215 insertions(+) create mode 100755 bart/SchedAssert.py create mode 100644 bart/Utils.py create mode 100644 bart/__init__.py diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py new file mode 100755 index 0000000..08165ec --- /dev/null +++ b/bart/SchedAssert.py @@ -0,0 +1,170 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: SchedAssert.py +# ---------------------------------------------------------------- +# $ +# +"""A library for asserting scheduler scenarios based on the +statistics aggregation framework""" + +import cr2 +import itertools +import math +from cr2.plotter.Utils import listify +from cr2.stats.Aggregator import MultiTriggerAggregator +from cr2.stats import SchedConf as sconf +from sheye import Utils + +# pylint: disable=invalid-name +# pylint: disable=too-many-arguments +class SchedAssert(object): + + """The primary focus of this class is to assert and verify + predefined scheduler scenarios. This does not compare parameters + across runs""" + + def __init__(self, run, topology, execname=None, pid=None): + """Args: + run (cr2.Run): A single cr2.Run object + or a path that can be passed to cr2.Run + topology(cr2.stats.Topology): The CPU topology + execname(str, optional): Optional execname of the task + under consideration. + PID(int): The PID of the task to be checked + + One of pid or execname is mandatory. If only execname + is specified, The current implementation will fail if + there are more than one processes with the same execname + """ + + run = Utils.init_run(run) + + if not execname and not pid: + raise ValueError("Need to specify at least one of pid or execname") + + self.execname = execname + self._run = run + self._pid = self._validate_pid(pid) + self._aggs = {} + self._topology = topology + self._triggers = sconf.sched_triggers(self._run, self._pid, + cr2.sched.SchedSwitch) + self.name = "{}-{}".format(self.execname, self._pid) + + def _validate_pid(self, pid): + """Validate the passed pid argument""" + + if not pid: + pids = sconf.get_pids_for_process(self._run, + self.execname) + + if len(pids) != 1: + raise RuntimeError( + "There should be exactly one PID {0} for {1}".format( + pids, + self.execname)) + + return pids[0] + + elif self.execname: + + pids = sconf.get_pids_for_process(self._run, + self.execname) + if pid not in pids: + raise RuntimeError( + "PID {0} not mapped to {1}".format( + pid, + self.execname)) + else: + self.execname = sconf.get_task_name(self._run, pid) + + return pid + + def _aggregator(self, aggfunc): + """ + Returns an aggregator corresponding to the + aggfunc, the aggregators are memoized for performance + + Args: + aggfunc (function(pandas.Series)): Function parameter that + accepts a pandas.Series object and returns a vector/scalar result + """ + + if aggfunc not in self._aggs.keys(): + self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers, + self._topology, + aggfunc) + return self._aggs[aggfunc] + + def getResidency(self, level, node, window=None, percent=False): + """ + Residency of the task is the amount of time it spends executing + a particular node of a topological level. For example: + + clusters=[] + big = [1,2] + little = [0,3,4,5] + + topology = Topology(clusters=clusters) + + level="cluster" + node = [1,2] + + Will return the residency of the task on the big cluster. If + percent is specified it will be normalized to the total RUNTIME + of the TASK + + Args: + level (hashable): The level to which the node belongs + node (list): The node for which residency needs to calculated + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + """ + + # Get the index of the node in the level + node_index = self._topology.get_index(level, node) + + agg = self._aggregator(sconf.residency_sum) + level_result = agg.aggregate(level=level, window=window) + + node_value = level_result[node_index] + + if percent: + total = agg.aggregate(level="all", window=window)[0] + node_value = node_value * 100 + node_value = node_value / total + + return node_value + + def assertResidency( + self, + level, + node, + expected_value, + operator, + window=None, + percent=False): + """ + Args: + level (hashable): The level to which the node belongs + node (list): The node for which residency needs to assert + expected_value (double): The expected value of the residency + operator (function): A binary operator function that returns + a boolean + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + """ + node_value = self.getResidency(level, node, window, percent) + return operator(node_value, expected_value) diff --git a/bart/Utils.py b/bart/Utils.py new file mode 100644 index 0000000..1806c87 --- /dev/null +++ b/bart/Utils.py @@ -0,0 +1,28 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: Utils.py +# ---------------------------------------------------------------- +# $ +# +"""Utility functions for sheye""" + +import cr2 + +def init_run(trace): + """Initialize the Run Object""" + + if isinstance(trace, basestring): + return cr2.Run(trace) + + elif isinstance(trace, cr2.Run): + return trace + + raise ValueError("Invalid trace Object") diff --git a/bart/__init__.py b/bart/__init__.py new file mode 100644 index 0000000..7e278ad --- /dev/null +++ b/bart/__init__.py @@ -0,0 +1,17 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: __init__.py +# ---------------------------------------------------------------- +# $ +# +"""Initialization for sheye""" + +from sheye import SchedAssert -- cgit v1.2.3 From 8ab36096a963d44e0a7d109a0e89c764d8b9f326 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:02:14 +0100 Subject: sheye: SchedAssert: Add getStartTime and getEndTime These functions facilitate calculation of the start and end time of PID relative to the start time of the trace. Change-Id: Iff0174109050153b681c86e2a11a86e929cce7f3 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 08165ec..2291ef0 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -168,3 +168,24 @@ class SchedAssert(object): """ node_value = self.getResidency(level, node, window, percent) return operator(node_value, expected_value) + + def getStartTime(self): + """ + Returns the first time the task ran + (across all CPUs) + """ + + agg = self._aggregator(sconf.first_time) + result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + return min(result[0]) + + def getEndTime(self): + """ + Returns the last time the task ran + (across all CPUs) + """ + + agg = self._aggregator(sconf.first_time) + agg = self._aggregator(sconf.last_time) + result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + return max(result[0]) -- cgit v1.2.3 From 8f5c2c4dac1ee0e6ef0558d2892d942806434d1f Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 17 Jun 2015 16:00:10 +0100 Subject: sheye: SchedAssert: Add assertSwitch assertSwitch facilitates the assertion that a task switches between cpus (or clusters) in a specified time window Change-Id: If15ad2bade473054c25548c615e866a0f5584be9 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 2291ef0..c71ce8a 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -189,3 +189,104 @@ class SchedAssert(object): agg = self._aggregator(sconf.last_time) result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) return max(result[0]) + + def _relax_switch_window(self, series, direction, window): + """ + direction == "left" + return the last time the task was running + if no such time exists in the window, + extend the window's left extent to + getStartTime + + direction == "right" + return the first time the task was running + in the window. If no such time exists in the + window, extend the window's right extent to + getEndTime() + + The function returns a None if + len(series[series == TASK_RUNNING]) == 0 + even in the extended window + """ + + series = series[series == sconf.TASK_RUNNING] + w_series = sconf.select_window(series, window) + start, stop = window + + if direction == "left": + if len(w_series): + return w_series.index.values[-1] + else: + start_time = self.getStartTime() + w_series = sconf.select_window( + series, + window=( + start_time, + start)) + + if not len(w_series): + return None + else: + return w_series.index.values[-1] + + elif direction == "right": + if len(w_series): + return w_series.index.values[0] + else: + end_time = self.getEndTime() + w_series = sconf.select_window(series, window=(stop, end_time)) + + if not len(w_series): + return None + else: + return w_series.index.values[0] + else: + raise ValueError("direction should be either left or right") + + def assertSwitch( + self, + level, + from_node, + to_node, + window, + ignore_multiple=True): + """ + This function asserts that there is context switch from the + from_node to the to_node: + + Args: + level (hashable): The level to which the node belongs + from_node (list): The node from which the task switches out + to_node (list): The node to which the task switches + window (tuple): A (start, end) tuple window of time where the + switch needs to be asserted + ignore_multiple (bool): If true, the function will ignore multiple + switches in the window, If false the assert will be true if and + only if there is a single switch within the specified window + + The function will only return true if and only if there is one + context switch between the specified nodes + """ + + from_node_index = self._topology.get_index(level, from_node) + to_node_index = self._topology.get_index(level, to_node) + + agg = self._aggregator(sconf.csum) + level_result = agg.aggregate(level=level) + + from_node_result = level_result[from_node_index] + to_node_result = level_result[to_node_index] + + from_time = self._relax_switch_window(from_node_result, "left", window) + if ignore_multiple: + to_time = self._relax_switch_window(to_node_result, "left", window) + else: + to_time = self._relax_switch_window( + to_node_result, + "right", window) + + if from_time and to_time: + if from_time < to_time: + return True + + return False -- cgit v1.2.3 From 6b09e1e99a1b3cdd949148790d08fbee083510d6 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 17 Jun 2015 16:02:19 +0100 Subject: sheye: SchedAssert: Add assertRuntime assertRuntime facilitates the assertion of the runtime of the task. Change-Id: Ie94469ef97dea4930de7180293680bd35d74d741 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index c71ce8a..cab08b3 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -290,3 +290,53 @@ class SchedAssert(object): return True return False + + def getRuntime(self, window=None, percent=False): + """Returns the Total Runtime of a task + + Args: + window (tuple): A (start, end) tuple to limit + the scope of the calculation + percent (boolean): If True, the result is returned + as a percentage of the total execution time + of the run. + """ + + agg = self._aggregator(sconf.residency_sum) + run_time = agg.aggregate(level="all", window=window)[0] + + if percent: + + if window: + begin, end = window + total_time = end - begin + else: + total_time_agg = self._aggregator(sconf.total_duration) + total_time = total_time_agg.aggregate( + level="all")[0] / self._topology.level_span("all") + + run_time = run_time * 100 + run_time = run_time / total_time + + return run_time + + def assertRuntime( + self, + expected_value, + operator, + window=None, + percent=False): + """Assert on the total runtime of the task + + Args: + expected_value (double): The expected value of the total runtime + operator (func(a, b)): A binary operator function that + returns a boolean + window (tuple): A (start, end) tuple to limit the + scope of the calculation + percent (boolean): If True, the result is returned + as a percentage of the total execution time of the run. + """ + + run_time = self.getRuntime(window, percent) + return operator(run_time, expected_value) -- cgit v1.2.3 From b20129b0dc655c59267005d09cfbc6ec8ea66ecc Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 17 Jun 2015 16:05:01 +0100 Subject: sheye: SchedAssert: Add assertDutyCycle assertDutyCycle facilitates the assertion of the duty cycle of the task in a given window of time Change-Id: I1578e265d0f2a2110e26f1092ca71e33ecae7978 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index cab08b3..b62a592 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -340,3 +340,36 @@ class SchedAssert(object): run_time = self.getRuntime(window, percent) return operator(run_time, expected_value) + + def getDutyCycle(self, window): + """Returns the duty cycle of the task + Args: + window (tuple): A (start, end) tuple to limit the + scope of the calculation + + Duty Cycle: + The percentage of time the task spends executing + in the given window + """ + + return self.getRuntime(window, percent=True) + + def assertDutyCycle(self, expected_value, operator, window): + """ + Args: + expected_value (double): The expected value of + the duty cycle + operator (func(a, b)): A binary operator function that + returns a boolean + window (tuple): A (start, end) tuple to limit the + scope of the calculation + + Duty Cycle: + The percentage of time the task spends executing + in the given window + """ + return self.assertRuntime( + expected_value, + operator, + window, + percent=True) -- cgit v1.2.3 From 2db0c3aad1a60a05567350447e58ee524e4184e4 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:09:28 +0100 Subject: sheye: SchedAssert: Add assertFirstCpu Facilitate the assertion of the first CPU that the task started on scoped by the trace/provided window. Change-Id: I4c42ebbf6d60194b4b2b09041a152addb16b8774 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index b62a592..aeac95b 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -373,3 +373,31 @@ class SchedAssert(object): operator, window, percent=True) + + def getFirstCpu(self, window=None): + """ + Args: + window (tuple): A (start, end) tuple to limit the + scope of the calculation + """ + + agg = self._aggregator(sconf.first_cpu) + result = agg.aggregate(level="cpu", window=window) + result = list(itertools.chain.from_iterable(result)) + + min_time = min(result) + if math.isinf(min_time): + return -1 + index = result.index(min_time) + return self._topology.get_node("cpu", index)[0] + + def assertFirstCpu(self, cpus, window=None): + """ + Args: + cpus (int, list): A list of acceptable CPUs + window (tuple): A (start, end) tuple to limit the scope + of the calculation + """ + first_cpu = self.getFirstCpu(window=window) + cpus = listify(cpus) + return first_cpu in cpus -- cgit v1.2.3 From 55d8954a8db5629df9460d1f7603bfa6511f60f6 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:11:29 +0100 Subject: sheye: SchedMultiAssert: Vector Assertions Introduction Multi assert class mirrors the SchedAssert in functionality but allows the user to assert a group of tasks and reduce the vector result to a boolean scalar. Change-Id: I0fd10ad6bfa69b7dd1913331718ba687d684ed2e Signed-off-by: Kapileshwar Singh --- bart/SchedMultiAssert.py | 119 +++++++++++++++++++++++++++++++++++++++++++++++ bart/__init__.py | 1 + 2 files changed, 120 insertions(+) create mode 100755 bart/SchedMultiAssert.py diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py new file mode 100755 index 0000000..4e5045d --- /dev/null +++ b/bart/SchedMultiAssert.py @@ -0,0 +1,119 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: SchedMultiAssert.py +# ---------------------------------------------------------------- +# $ +# +"""A library for asserting scheduler scenarios based on the +statistics aggregation framework""" + +import re +import inspect +import cr2 +from cr2.stats import SchedConf as sconf +from cr2.plotter.Utils import listify +from sheye.SchedAssert import SchedAssert +from sheye import Utils + +class SchedMultiAssert(object): + + """The primary focus of this class is to assert and verify + predefined scheduler scenarios. This does not compare parameters + across runs""" + + def __init__(self, run, topology, execnames): + """Args: + run (cr2.Run): A single cr2.Run object + or a path that can be passed to cr2.Run + topology(cr2.stats.Topology): The CPU topology + execname(str, list): List of execnames or single task + """ + + self._execnames = listify(execnames) + self._run = Utils.init_run(run) + self._pids = self._populate_pids() + self._topology = topology + self._asserts = self._populate_asserts() + self._populate_methods() + + def _populate_asserts(self): + """Populate SchedAsserts for the PIDs""" + + asserts = {} + + for pid in self._pids: + asserts[pid] = SchedAssert(self._run, self._topology, pid=pid) + + return asserts + + def _populate_pids(self): + """Map the input execnames to PIDs""" + + if len(self._execnames) == 1: + return sconf.get_pids_for_process(self._run, self._execnames[0]) + + pids = [] + + for proc in self._execnames: + pids += sconf.get_pids_for_process(self._run, proc) + + return list(set(pids)) + + def _create_method(self, attr_name): + """A wrapper function to create a dispatch function""" + + return lambda *args, **kwargs: self._dispatch(attr_name, *args, **kwargs) + + def _populate_methods(self): + """Populate Methods from SchedAssert""" + + for attr_name in dir(SchedAssert): + attr = getattr(SchedAssert, attr_name) + + valid_method = attr_name.startswith("get") or \ + attr_name.startswith("assert") + if inspect.ismethod(attr) and valid_method: + func = self._create_method(attr_name) + setattr(self, attr_name, func) + + def get_task_name(self, pid): + """Get task name for the PID""" + return self._asserts[pid].execname + + + def _dispatch(self, func_name, *args, **kwargs): + """The dispatch function to call into the SchedAssert + Method + """ + + assert_func = func_name.startswith("assert") + num_true = 0 + + rank = kwargs.pop("rank", None) + result = kwargs.pop("result", {}) + param = kwargs.pop("param", re.sub(r"assert|get", "", func_name, count=1).lower()) + + for pid in self._pids: + + if pid not in result: + result[pid] = {} + result[pid]["task_name"] = self.get_task_name(pid) + + attr = getattr(self._asserts[pid], func_name) + result[pid][param] = attr(*args, **kwargs) + + if assert_func and result[pid][param]: + num_true += 1 + + if assert_func and rank: + return num_true == rank + else: + return result diff --git a/bart/__init__.py b/bart/__init__.py index 7e278ad..2417a3c 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -15,3 +15,4 @@ """Initialization for sheye""" from sheye import SchedAssert +from sheye import SchedMultiAssert -- cgit v1.2.3 From 055fff0ea347ee48964fdeb3a48902dbe5359dfc Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:16:28 +0100 Subject: sheye: SchedMatrix: Introduction SchedMatrix provides the ability to correlate a test run with a reference run and assert Similarity between the two based on statistical correlation of task residency waveforms Change-Id: Ia1877980866c59333a9f5823aaf55fdf1e716d1b Signed-off-by: Kapileshwar Singh --- bart/SchedMatrix.py | 209 ++++++++++++++++++++++++++++++++++++++++++++++++++++ bart/__init__.py | 1 + 2 files changed, 210 insertions(+) create mode 100755 bart/SchedMatrix.py diff --git a/bart/SchedMatrix.py b/bart/SchedMatrix.py new file mode 100755 index 0000000..9742ca3 --- /dev/null +++ b/bart/SchedMatrix.py @@ -0,0 +1,209 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: SchedMatrix.py +# ---------------------------------------------------------------- +# $ +# +""" +The SchedMatrix provides an ability to compare two executions +of benchmarks with multiple processes. + +For example, consider a benchmark that spawns 4 identical threads +and any two threads should exhibit a certain behaviours and the +remaining another identical but different behaviour. + +SchedMatrix creates a Matrix of Scheduler Waveform Correlations + +A = Reference Execution +B = Execution to be Evaluated + + +---+ +---+ + | | | | +A1, B3 +---+ +--+ +--------------+ + +---+ +---+ + | | | | +A2, B4 +--------------+ +--+ +---+ + +---+ +---+ + | | | | +A3, B1 +---+ +--+ +--------------+ + +---+ +---+ + | | | | +A4, B2 +--------------+ +--+ +---+ + + +Correlation Matrix + + B1 B2 B3 B4 +A1 1 0 1 0 + +A2 0 1 0 1 + +A3 1 0 1 0 + +A4 0 1 0 1 + + +Thus a success criteria can be defined as + +A1 has two similar threads in the +evaluated execution + +assertSiblings(A1, 2, operator.eq) +assertSiblings(A2, 2, operator.eq) +assertSiblings(A3, 2, operator.eq) +assertSiblings(A4, 2, operator.eq) +""" + + +import sys +import cr2 +import numpy as np +from cr2.stats.Aggregator import MultiTriggerAggregator +from cr2.stats.Correlator import Correlator +from cr2.plotter.Utils import listify +from cr2.stats import SchedConf as sconf +from sheye import Utils + +POSITIVE_TOLERANCE = 0.80 + +# pylint: disable=invalid-name +# pylint: disable=too-many-arguments + + +class SchedMatrix(object): + + """Valid cases are: + + * Single execname, multiple PIDs + * PID List + * Multiple execname, one-to-one PID + association + """ + + def __init__( + self, + reference_trace, + trace, + topology, + execnames, + aggfunc=sconf.csum): + + run = Utils.init_run(trace) + reference_run = Utils.init_run(reference_trace) + + self._execnames = listify(execnames) + self._reference_pids = self._populate_pids(reference_run) + self._pids = self._populate_pids(run) + self._dimension = len(self._pids) + self._topology = topology + self._matrix = self._generate_matrix(run, reference_run, aggfunc) + + if len(self._pids) != len(self._reference_pids): + raise RuntimeError( + "The runs do not have the same number of PIDs for {0}".format( + str(execnames))) + + def _populate_pids(self, run): + """Populate the qualifying PIDs from the run""" + + if len(self._execnames) == 1: + return sconf.get_pids_for_process(run, self._execnames[0]) + + pids = [] + + for proc in self._execnames: + pids += sconf.get_pids_for_process(run, proc) + + return list(set(pids)) + + def _generate_matrix(self, run, reference_run, aggfunc): + """Generate the Correlation Matrix""" + + reference_aggs = [] + aggs = [] + + for idx in range(self._dimension): + + reference_aggs.append( + MultiTriggerAggregator( + sconf.sched_triggers( + reference_run, + self._reference_pids[idx], + cr2.sched.SchedSwitch + ), + self._topology, + aggfunc)) + + aggs.append( + MultiTriggerAggregator( + sconf.sched_triggers( + run, + self._pids[idx], + cr2.sched.SchedSwitch + ), + self._topology, + aggfunc)) + + agg_pair_gen = ((r_agg, agg) + for r_agg in reference_aggs for agg in aggs) + + # pylint fails to recognize numpy members. + # pylint: disable=no-member + matrix = np.zeros((self._dimension, self._dimension)) + # pylint: enable=no-member + + for (ref_result, test_result) in agg_pair_gen: + i = reference_aggs.index(ref_result) + j = aggs.index(test_result) + corr = Correlator( + ref_result, + test_result, + corrfunc=sconf.binary_correlate, + filter_gaps=True) + _, total = corr.correlate(level="cluster") + + matrix[i][j] = total + + return matrix + + def print_matrix(self): + """Print the correlation matrix""" + + # pylint fails to recognize numpy members. + # pylint: disable=no-member + np.set_printoptions(precision=5) + np.set_printoptions(suppress=False) + np.savetxt(sys.stdout, self._matrix, "%5.5f") + # pylint: enable=no-member + + def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE): + """Return the number of processes in the + reference trace that have a correlation + greater than tolerance + """ + + ref_pid_idx = self._reference_pids.index(pid) + pid_result = self._matrix[ref_pid_idx] + return len(pid_result[pid_result > tolerance]) + + def assertSiblings(self, pid, expected_value, operator, + tolerance=POSITIVE_TOLERANCE): + """Assert that the number of siblings in the reference + trace match the expected value and the operator + + Args: + pid: The PID in the reference trace + expected_value: the second argument to the operator + operator: a function of the type f(a, b) that returns + a boolean + """ + num_siblings = self.getSiblings(pid, tolerance) + return operator(num_siblings, expected_value) diff --git a/bart/__init__.py b/bart/__init__.py index 2417a3c..5a09d67 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -16,3 +16,4 @@ from sheye import SchedAssert from sheye import SchedMultiAssert +from sheye import SchedMatrix -- cgit v1.2.3 From 074a3ccf9508ef2feea1fa9125c8157eb3d51bc3 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:14:49 +0100 Subject: sheye: Add plot function for assert objects Provide the user with the ability to visualize assertion data and process execution residency by using cr2.plotter.EventPlot Change-Id: I199439706b613f8f3af4b637b37ae608bade4483 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 34 ++++++++++++++++++++++++++++++++++ bart/SchedMultiAssert.py | 20 ++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index aeac95b..2b4f74c 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -401,3 +401,37 @@ class SchedAssert(object): first_cpu = self.getFirstCpu(window=window) cpus = listify(cpus) return first_cpu in cpus + + def generate_events(self, start_id=0): + """Generate events for the trace plot""" + + agg = self._aggregator(sconf.trace_event) + result = agg.aggregate(level="cpu") + + events = [] + rect_id = start_id + for idx, cpu_events in enumerate(result): + cpu = self._topology.get_node("cpu", idx)[0] + if not cpu_events: + continue + + for event in cpu_events: + event["id"] = rect_id + event["name"] = self.name + event["lane"] = cpu + events.append(event) + rect_id += 1 + + return events + + def plot(self): + """ + Returns: + cr2.plotter.AbstractDataPlotter + Call .view() to draw the graph + """ + level = "cpu" + events = self.generate_events() + names = [self.name] + num_lanes = self._topology.level_span(level) + return cr2.EventPlot(events, names, "CPU: ", num_lanes) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index 4e5045d..47ad936 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -117,3 +117,23 @@ class SchedMultiAssert(object): return num_true == rank else: return result + + def generate_events(self): + """Generate Events for the trace plot""" + + events = [] + for s_assert in self._asserts.values(): + events += s_assert.generate_events(start_id=len(events)) + return events + + def plot(self): + """ + Returns: + cr2.plotter.AbstractDataPlotter. Call .view() for + displaying the plot + """ + level = "cpu" + events = self.generate_events() + names = [s.name for s in self._asserts.values()] + num_lanes = self._topology.level_span(level) + return cr2.EventPlot(events, names, "CPU: ", num_lanes) -- cgit v1.2.3 From eea13b8dab45ea0e31d63bb5ae8811c5470472f6 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 18 Jun 2015 00:39:58 +0100 Subject: sheye: Allow plotting for all topological levels Change-Id: Ic4f5eb1ce6d1a718cc3aaaf5fb77a4895a4288d0 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 21 ++++++++++----------- bart/SchedMultiAssert.py | 12 ++++++------ 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 2b4f74c..2f3c133 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -402,36 +402,35 @@ class SchedAssert(object): cpus = listify(cpus) return first_cpu in cpus - def generate_events(self, start_id=0): + def generate_events(self, level, start_id=0): """Generate events for the trace plot""" agg = self._aggregator(sconf.trace_event) - result = agg.aggregate(level="cpu") + result = agg.aggregate(level=level) events = [] rect_id = start_id - for idx, cpu_events in enumerate(result): - cpu = self._topology.get_node("cpu", idx)[0] - if not cpu_events: + for idx, level_events in enumerate(result): + if not level_events: continue - for event in cpu_events: + for event in level_events: event["id"] = rect_id event["name"] = self.name - event["lane"] = cpu + event["lane"] = idx events.append(event) rect_id += 1 return events - def plot(self): + def plot(self, level="cpu"): """ Returns: cr2.plotter.AbstractDataPlotter Call .view() to draw the graph """ - level = "cpu" - events = self.generate_events() + events = self.generate_events(level) names = [self.name] num_lanes = self._topology.level_span(level) - return cr2.EventPlot(events, names, "CPU: ", num_lanes) + lane_prefix = level.upper() + ": " + return cr2.EventPlot(events, names, lane_prefix, num_lanes) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index 47ad936..821edb4 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -118,22 +118,22 @@ class SchedMultiAssert(object): else: return result - def generate_events(self): + def generate_events(self, level): """Generate Events for the trace plot""" events = [] for s_assert in self._asserts.values(): - events += s_assert.generate_events(start_id=len(events)) + events += s_assert.generate_events(level, start_id=len(events)) return events - def plot(self): + def plot(self, level="cpu"): """ Returns: cr2.plotter.AbstractDataPlotter. Call .view() for displaying the plot """ - level = "cpu" - events = self.generate_events() + events = self.generate_events(level) names = [s.name for s in self._asserts.values()] num_lanes = self._topology.level_span(level) - return cr2.EventPlot(events, names, "CPU: ", num_lanes) + lane_prefix = level.upper() + ": " + return cr2.EventPlot(events, names, lane_prefix, num_lanes) -- cgit v1.2.3 From a9c8716f5daa25a7c9bb2052bc6c02d2bf11b6c9 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 23 Jul 2015 16:48:54 +0100 Subject: teye: Introducing ThermalAssert Change-Id: I79d18fae3fa2735df942f6a81457cdbf86c0c643 Signed-off-by: Kapileshwar Singh --- bart/ThermalAssert.py | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 bart/ThermalAssert.py diff --git a/bart/ThermalAssert.py b/bart/ThermalAssert.py new file mode 100644 index 0000000..2fc124a --- /dev/null +++ b/bart/ThermalAssert.py @@ -0,0 +1,53 @@ +# $Copyright: +# ---------------------------------------------------------------- +# This confidential and proprietary software may be used only as +# authorised by a licensing agreement from ARM Limited +# (C) COPYRIGHT 2015 ARM Limited +# ALL RIGHTS RESERVED +# The entire notice above must be reproduced on all authorised +# copies and copies may only be made to the extent permitted +# by a licensing agreement from ARM Limited. +# ---------------------------------------------------------------- +# File: ThermalAssert.py +# ---------------------------------------------------------------- +# $ +# +"""Allow the user to assert various conditions +based on the grammar defined in cr2.stats.grammar. The class is +also intended to have aggregator based functionality. This is not +implemented yet. +""" + +from cr2.stats.grammar import Parser +import warnings +import numpy as np + +# pylint: disable=invalid-name + +class ThermalAssert(object): + + """ + Args: + data (cr2.Run): A cr2.Run instance + config (dict): A dictionary of variables, classes + and functions that can be used in the statements + """ + + def __init__(self, data, config): + self._parser = Parser(data, config) + + def assertStatement(self, statement): + """Solve the statement for a boolean result""" + + result = self.getStatement(statement) + # pylint: disable=no-member + if not (isinstance(result, bool) or isinstance(result, np.bool_)): + warnings.warn( + "solution of {} is not an instance of bool".format(statement)) + return result + # pylint: enable=no-member + + def getStatement(self, statement): + """Evaluate the statement""" + + return self._parser.solve(statement) -- cgit v1.2.3 From 021d6923c33bf79d2e7efaee5ffc76153baaf452 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Tue, 28 Jul 2015 00:16:01 +0100 Subject: sheye: Use cr2.Run -> get_duration Get rid of the usage of total_duration aggregator Change-Id: I9eb9462b0498a5070a06cbf85e0c4776f2ebab5a Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 2f3c133..887a4f7 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -311,9 +311,7 @@ class SchedAssert(object): begin, end = window total_time = end - begin else: - total_time_agg = self._aggregator(sconf.total_duration) - total_time = total_time_agg.aggregate( - level="all")[0] / self._topology.level_span("all") + total_time = self._run.get_duration() run_time = run_time * 100 run_time = run_time / total_time -- cgit v1.2.3 From 0a47937d46da5ecfc67ea35358ab0f5b096d088e Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Tue, 28 Jul 2015 00:16:57 +0100 Subject: sheye: Use new EventPlot data fromat Accommodate changes made to EventPlot Change-Id: Ib2893fe7d8a93bff13930a763eec00ac361b0452 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 21 ++++++++------------- bart/SchedMultiAssert.py | 7 ++++--- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 887a4f7..8f40e51 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -22,6 +22,7 @@ from cr2.plotter.Utils import listify from cr2.stats.Aggregator import MultiTriggerAggregator from cr2.stats import SchedConf as sconf from sheye import Utils +import numpy as np # pylint: disable=invalid-name # pylint: disable=too-many-arguments @@ -405,21 +406,14 @@ class SchedAssert(object): agg = self._aggregator(sconf.trace_event) result = agg.aggregate(level=level) - events = [] - rect_id = start_id + for idx, level_events in enumerate(result): - if not level_events: + if not len(level_events): continue + events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist() - for event in level_events: - event["id"] = rect_id - event["name"] = self.name - event["lane"] = idx - events.append(event) - rect_id += 1 - - return events + return sorted(events, key = lambda x : x[0]) def plot(self, level="cpu"): """ @@ -427,8 +421,9 @@ class SchedAssert(object): cr2.plotter.AbstractDataPlotter Call .view() to draw the graph """ - events = self.generate_events(level) + events = {} + events[self.name] = self.generate_events(level) names = [self.name] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes) + return cr2.EventPlot(events, names, lane_prefix, num_lanes, [0, self._run.get_duration()]) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index 821edb4..c0238b0 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -121,9 +121,10 @@ class SchedMultiAssert(object): def generate_events(self, level): """Generate Events for the trace plot""" - events = [] + events = {} for s_assert in self._asserts.values(): - events += s_assert.generate_events(level, start_id=len(events)) + events[s_assert.name] = s_assert.generate_events(level) + return events def plot(self, level="cpu"): @@ -136,4 +137,4 @@ class SchedMultiAssert(object): names = [s.name for s in self._asserts.values()] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes) + return cr2.EventPlot(events, names, lane_prefix, num_lanes, [0, self._run.get_duration()]) -- cgit v1.2.3 From 83a6edf5cfc1423f1971613736eaae0cea36f107 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 3 Aug 2015 23:13:44 +0100 Subject: sheye: Allow user specified window and xlim of traces Change-Id: I090ac62b68371151e164f545d04b38d413231760 Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 17 ++++++++++++----- bart/SchedMultiAssert.py | 17 ++++++++++++----- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 8f40e51..7538322 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -401,11 +401,11 @@ class SchedAssert(object): cpus = listify(cpus) return first_cpu in cpus - def generate_events(self, level, start_id=0): + def generate_events(self, level, start_id=0, window=None): """Generate events for the trace plot""" agg = self._aggregator(sconf.trace_event) - result = agg.aggregate(level=level) + result = agg.aggregate(level=level, window=window) events = [] for idx, level_events in enumerate(result): @@ -415,15 +415,22 @@ class SchedAssert(object): return sorted(events, key = lambda x : x[0]) - def plot(self, level="cpu"): + def plot(self, level="cpu", window=None, xlim=None): """ Returns: cr2.plotter.AbstractDataPlotter Call .view() to draw the graph """ + + if not xlim: + if not window: + xlim = [0, self._run.get_duration()] + else: + xlim = list(window) + events = {} - events[self.name] = self.generate_events(level) + events[self.name] = self.generate_events(level, window) names = [self.name] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes, [0, self._run.get_duration()]) + return cr2.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index c0238b0..71082b4 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -118,23 +118,30 @@ class SchedMultiAssert(object): else: return result - def generate_events(self, level): + def generate_events(self, level, window=None): """Generate Events for the trace plot""" events = {} for s_assert in self._asserts.values(): - events[s_assert.name] = s_assert.generate_events(level) + events[s_assert.name] = s_assert.generate_events(level, window=window) return events - def plot(self, level="cpu"): + def plot(self, level="cpu", window=None, xlim=None): """ Returns: cr2.plotter.AbstractDataPlotter. Call .view() for displaying the plot """ - events = self.generate_events(level) + + if not xlim: + if not window: + xlim = [0, self._run.get_duration()] + else: + xlim = list(window) + + events = self.generate_events(level, window) names = [s.name for s in self._asserts.values()] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes, [0, self._run.get_duration()]) + return cr2.EventPlot(events, names, lane_prefix, num_lanes, xlim) -- cgit v1.2.3 From 94d913ce019b7787a20441565e19f5cb51e28ea9 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 10 Aug 2015 14:11:54 +0100 Subject: teye: change copyright to Apache 2 Change-Id: Ic379ba10de6a78534a8bff7a290a8e52bca8b7c6 --- bart/ThermalAssert.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/bart/ThermalAssert.py b/bart/ThermalAssert.py index 2fc124a..65b3bd5 100644 --- a/bart/ThermalAssert.py +++ b/bart/ThermalAssert.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: ThermalAssert.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """Allow the user to assert various conditions based on the grammar defined in cr2.stats.grammar. The class is also intended to have aggregator based functionality. This is not -- cgit v1.2.3 From 2c0a6b89b731940010a41cd4ddc0ef2a991d231d Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 10 Aug 2015 14:11:33 +0100 Subject: sheye: change copyright to Apache 2 Change-Id: Ic203a8211b630e5a5777697c1ac867a6bb7c09e5 --- bart/SchedAssert.py | 27 ++++++++++++++------------- bart/SchedMatrix.py | 27 ++++++++++++++------------- bart/SchedMultiAssert.py | 27 ++++++++++++++------------- bart/Utils.py | 27 ++++++++++++++------------- bart/__init__.py | 27 ++++++++++++++------------- 5 files changed, 70 insertions(+), 65 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 7538322..258bc42 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: SchedAssert.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """A library for asserting scheduler scenarios based on the statistics aggregation framework""" diff --git a/bart/SchedMatrix.py b/bart/SchedMatrix.py index 9742ca3..3cd29b6 100755 --- a/bart/SchedMatrix.py +++ b/bart/SchedMatrix.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: SchedMatrix.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """ The SchedMatrix provides an ability to compare two executions of benchmarks with multiple processes. diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index 71082b4..04857fe 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: SchedMultiAssert.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """A library for asserting scheduler scenarios based on the statistics aggregation framework""" diff --git a/bart/Utils.py b/bart/Utils.py index 1806c87..82fec3b 100644 --- a/bart/Utils.py +++ b/bart/Utils.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: Utils.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """Utility functions for sheye""" import cr2 diff --git a/bart/__init__.py b/bart/__init__.py index 5a09d67..fe4c006 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -1,17 +1,18 @@ -# $Copyright: -# ---------------------------------------------------------------- -# This confidential and proprietary software may be used only as -# authorised by a licensing agreement from ARM Limited -# (C) COPYRIGHT 2015 ARM Limited -# ALL RIGHTS RESERVED -# The entire notice above must be reproduced on all authorised -# copies and copies may only be made to the extent permitted -# by a licensing agreement from ARM Limited. -# ---------------------------------------------------------------- -# File: __init__.py -# ---------------------------------------------------------------- -# $ +# Copyright 2015-2015 ARM Limited # +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """Initialization for sheye""" from sheye import SchedAssert -- cgit v1.2.3 From 6b48a45842b50e4a50e2db2fd8db63b63e6b69c2 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 10 Aug 2015 16:01:40 +0100 Subject: teye: rename cr2 to trappy Change-Id: I1f5dda6dc32e7ba362129f2ec506c5ef7b1bbd4b --- bart/ThermalAssert.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bart/ThermalAssert.py b/bart/ThermalAssert.py index 65b3bd5..44f2790 100644 --- a/bart/ThermalAssert.py +++ b/bart/ThermalAssert.py @@ -14,12 +14,12 @@ # """Allow the user to assert various conditions -based on the grammar defined in cr2.stats.grammar. The class is +based on the grammar defined in trappy.stats.grammar. The class is also intended to have aggregator based functionality. This is not implemented yet. """ -from cr2.stats.grammar import Parser +from trappy.stats.grammar import Parser import warnings import numpy as np @@ -29,7 +29,7 @@ class ThermalAssert(object): """ Args: - data (cr2.Run): A cr2.Run instance + data (trappy.Run): A trappy.Run instance config (dict): A dictionary of variables, classes and functions that can be used in the statements """ -- cgit v1.2.3 From 6d298962c8a0bfd09bd5d0a2c0e2f2e0965716e5 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 10 Aug 2015 16:00:13 +0100 Subject: sheye: rename cr2 to trappy Change-Id: I208ad1de68d9082355de1c7c473323f4e19a78c0 --- bart/SchedAssert.py | 20 ++++++++++---------- bart/SchedMatrix.py | 14 +++++++------- bart/SchedMultiAssert.py | 16 ++++++++-------- bart/Utils.py | 6 +++--- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py index 258bc42..324fb84 100755 --- a/bart/SchedAssert.py +++ b/bart/SchedAssert.py @@ -16,12 +16,12 @@ """A library for asserting scheduler scenarios based on the statistics aggregation framework""" -import cr2 +import trappy import itertools import math -from cr2.plotter.Utils import listify -from cr2.stats.Aggregator import MultiTriggerAggregator -from cr2.stats import SchedConf as sconf +from trappy.plotter.Utils import listify +from trappy.stats.Aggregator import MultiTriggerAggregator +from trappy.stats import SchedConf as sconf from sheye import Utils import numpy as np @@ -35,9 +35,9 @@ class SchedAssert(object): def __init__(self, run, topology, execname=None, pid=None): """Args: - run (cr2.Run): A single cr2.Run object - or a path that can be passed to cr2.Run - topology(cr2.stats.Topology): The CPU topology + run (trappy.Run): A single trappy.Run object + or a path that can be passed to trappy.Run + topology(trappy.stats.Topology): The CPU topology execname(str, optional): Optional execname of the task under consideration. PID(int): The PID of the task to be checked @@ -58,7 +58,7 @@ class SchedAssert(object): self._aggs = {} self._topology = topology self._triggers = sconf.sched_triggers(self._run, self._pid, - cr2.sched.SchedSwitch) + trappy.sched.SchedSwitch) self.name = "{}-{}".format(self.execname, self._pid) def _validate_pid(self, pid): @@ -419,7 +419,7 @@ class SchedAssert(object): def plot(self, level="cpu", window=None, xlim=None): """ Returns: - cr2.plotter.AbstractDataPlotter + trappy.plotter.AbstractDataPlotter Call .view() to draw the graph """ @@ -434,4 +434,4 @@ class SchedAssert(object): names = [self.name] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes, xlim) + return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/SchedMatrix.py b/bart/SchedMatrix.py index 3cd29b6..a6684e0 100755 --- a/bart/SchedMatrix.py +++ b/bart/SchedMatrix.py @@ -65,12 +65,12 @@ assertSiblings(A4, 2, operator.eq) import sys -import cr2 +import trappy import numpy as np -from cr2.stats.Aggregator import MultiTriggerAggregator -from cr2.stats.Correlator import Correlator -from cr2.plotter.Utils import listify -from cr2.stats import SchedConf as sconf +from trappy.stats.Aggregator import MultiTriggerAggregator +from trappy.stats.Correlator import Correlator +from trappy.plotter.Utils import listify +from trappy.stats import SchedConf as sconf from sheye import Utils POSITIVE_TOLERANCE = 0.80 @@ -138,7 +138,7 @@ class SchedMatrix(object): sconf.sched_triggers( reference_run, self._reference_pids[idx], - cr2.sched.SchedSwitch + trappy.sched.SchedSwitch ), self._topology, aggfunc)) @@ -148,7 +148,7 @@ class SchedMatrix(object): sconf.sched_triggers( run, self._pids[idx], - cr2.sched.SchedSwitch + trappy.sched.SchedSwitch ), self._topology, aggfunc)) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py index 04857fe..4d884b1 100755 --- a/bart/SchedMultiAssert.py +++ b/bart/SchedMultiAssert.py @@ -18,9 +18,9 @@ statistics aggregation framework""" import re import inspect -import cr2 -from cr2.stats import SchedConf as sconf -from cr2.plotter.Utils import listify +import trappy +from trappy.stats import SchedConf as sconf +from trappy.plotter.Utils import listify from sheye.SchedAssert import SchedAssert from sheye import Utils @@ -32,9 +32,9 @@ class SchedMultiAssert(object): def __init__(self, run, topology, execnames): """Args: - run (cr2.Run): A single cr2.Run object - or a path that can be passed to cr2.Run - topology(cr2.stats.Topology): The CPU topology + run (trappy.Run): A single trappy.Run object + or a path that can be passed to trappy.Run + topology(trappy.stats.Topology): The CPU topology execname(str, list): List of execnames or single task """ @@ -131,7 +131,7 @@ class SchedMultiAssert(object): def plot(self, level="cpu", window=None, xlim=None): """ Returns: - cr2.plotter.AbstractDataPlotter. Call .view() for + trappy.plotter.AbstractDataPlotter. Call .view() for displaying the plot """ @@ -145,4 +145,4 @@ class SchedMultiAssert(object): names = [s.name for s in self._asserts.values()] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return cr2.EventPlot(events, names, lane_prefix, num_lanes, xlim) + return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/Utils.py b/bart/Utils.py index 82fec3b..bd49de9 100644 --- a/bart/Utils.py +++ b/bart/Utils.py @@ -15,15 +15,15 @@ """Utility functions for sheye""" -import cr2 +import trappy def init_run(trace): """Initialize the Run Object""" if isinstance(trace, basestring): - return cr2.Run(trace) + return trappy.Run(trace) - elif isinstance(trace, cr2.Run): + elif isinstance(trace, trappy.Run): return trace raise ValueError("Invalid trace Object") -- cgit v1.2.3 From e9f28ed7ce6041b58ff08a6ecf5486a8e8ac95e3 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 13 Aug 2015 18:37:11 +0100 Subject: Add LICENSE --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -- cgit v1.2.3 From 0329a41aca1a181ef47c98638acce7e331f1f324 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:07:37 +0100 Subject: sched: Move Sched Assertions to a separate directory Also change namespace from sheye to bart.sched Signed-off-by: Kapileshwar Singh --- bart/SchedAssert.py | 437 ----------------------------------------- bart/SchedMatrix.py | 210 -------------------- bart/SchedMultiAssert.py | 148 -------------- bart/__init__.py | 20 -- bart/sched/SchedAssert.py | 437 +++++++++++++++++++++++++++++++++++++++++ bart/sched/SchedMatrix.py | 210 ++++++++++++++++++++ bart/sched/SchedMultiAssert.py | 148 ++++++++++++++ bart/sched/__init__.py | 21 ++ 8 files changed, 816 insertions(+), 815 deletions(-) delete mode 100755 bart/SchedAssert.py delete mode 100755 bart/SchedMatrix.py delete mode 100755 bart/SchedMultiAssert.py delete mode 100644 bart/__init__.py create mode 100755 bart/sched/SchedAssert.py create mode 100755 bart/sched/SchedMatrix.py create mode 100755 bart/sched/SchedMultiAssert.py create mode 100644 bart/sched/__init__.py diff --git a/bart/SchedAssert.py b/bart/SchedAssert.py deleted file mode 100755 index 324fb84..0000000 --- a/bart/SchedAssert.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""A library for asserting scheduler scenarios based on the -statistics aggregation framework""" - -import trappy -import itertools -import math -from trappy.plotter.Utils import listify -from trappy.stats.Aggregator import MultiTriggerAggregator -from trappy.stats import SchedConf as sconf -from sheye import Utils -import numpy as np - -# pylint: disable=invalid-name -# pylint: disable=too-many-arguments -class SchedAssert(object): - - """The primary focus of this class is to assert and verify - predefined scheduler scenarios. This does not compare parameters - across runs""" - - def __init__(self, run, topology, execname=None, pid=None): - """Args: - run (trappy.Run): A single trappy.Run object - or a path that can be passed to trappy.Run - topology(trappy.stats.Topology): The CPU topology - execname(str, optional): Optional execname of the task - under consideration. - PID(int): The PID of the task to be checked - - One of pid or execname is mandatory. If only execname - is specified, The current implementation will fail if - there are more than one processes with the same execname - """ - - run = Utils.init_run(run) - - if not execname and not pid: - raise ValueError("Need to specify at least one of pid or execname") - - self.execname = execname - self._run = run - self._pid = self._validate_pid(pid) - self._aggs = {} - self._topology = topology - self._triggers = sconf.sched_triggers(self._run, self._pid, - trappy.sched.SchedSwitch) - self.name = "{}-{}".format(self.execname, self._pid) - - def _validate_pid(self, pid): - """Validate the passed pid argument""" - - if not pid: - pids = sconf.get_pids_for_process(self._run, - self.execname) - - if len(pids) != 1: - raise RuntimeError( - "There should be exactly one PID {0} for {1}".format( - pids, - self.execname)) - - return pids[0] - - elif self.execname: - - pids = sconf.get_pids_for_process(self._run, - self.execname) - if pid not in pids: - raise RuntimeError( - "PID {0} not mapped to {1}".format( - pid, - self.execname)) - else: - self.execname = sconf.get_task_name(self._run, pid) - - return pid - - def _aggregator(self, aggfunc): - """ - Returns an aggregator corresponding to the - aggfunc, the aggregators are memoized for performance - - Args: - aggfunc (function(pandas.Series)): Function parameter that - accepts a pandas.Series object and returns a vector/scalar result - """ - - if aggfunc not in self._aggs.keys(): - self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers, - self._topology, - aggfunc) - return self._aggs[aggfunc] - - def getResidency(self, level, node, window=None, percent=False): - """ - Residency of the task is the amount of time it spends executing - a particular node of a topological level. For example: - - clusters=[] - big = [1,2] - little = [0,3,4,5] - - topology = Topology(clusters=clusters) - - level="cluster" - node = [1,2] - - Will return the residency of the task on the big cluster. If - percent is specified it will be normalized to the total RUNTIME - of the TASK - - Args: - level (hashable): The level to which the node belongs - node (list): The node for which residency needs to calculated - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: If true the result is normalized to the total runtime - of the task and returned as a percentage - """ - - # Get the index of the node in the level - node_index = self._topology.get_index(level, node) - - agg = self._aggregator(sconf.residency_sum) - level_result = agg.aggregate(level=level, window=window) - - node_value = level_result[node_index] - - if percent: - total = agg.aggregate(level="all", window=window)[0] - node_value = node_value * 100 - node_value = node_value / total - - return node_value - - def assertResidency( - self, - level, - node, - expected_value, - operator, - window=None, - percent=False): - """ - Args: - level (hashable): The level to which the node belongs - node (list): The node for which residency needs to assert - expected_value (double): The expected value of the residency - operator (function): A binary operator function that returns - a boolean - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: If true the result is normalized to the total runtime - of the task and returned as a percentage - """ - node_value = self.getResidency(level, node, window, percent) - return operator(node_value, expected_value) - - def getStartTime(self): - """ - Returns the first time the task ran - (across all CPUs) - """ - - agg = self._aggregator(sconf.first_time) - result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) - return min(result[0]) - - def getEndTime(self): - """ - Returns the last time the task ran - (across all CPUs) - """ - - agg = self._aggregator(sconf.first_time) - agg = self._aggregator(sconf.last_time) - result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) - return max(result[0]) - - def _relax_switch_window(self, series, direction, window): - """ - direction == "left" - return the last time the task was running - if no such time exists in the window, - extend the window's left extent to - getStartTime - - direction == "right" - return the first time the task was running - in the window. If no such time exists in the - window, extend the window's right extent to - getEndTime() - - The function returns a None if - len(series[series == TASK_RUNNING]) == 0 - even in the extended window - """ - - series = series[series == sconf.TASK_RUNNING] - w_series = sconf.select_window(series, window) - start, stop = window - - if direction == "left": - if len(w_series): - return w_series.index.values[-1] - else: - start_time = self.getStartTime() - w_series = sconf.select_window( - series, - window=( - start_time, - start)) - - if not len(w_series): - return None - else: - return w_series.index.values[-1] - - elif direction == "right": - if len(w_series): - return w_series.index.values[0] - else: - end_time = self.getEndTime() - w_series = sconf.select_window(series, window=(stop, end_time)) - - if not len(w_series): - return None - else: - return w_series.index.values[0] - else: - raise ValueError("direction should be either left or right") - - def assertSwitch( - self, - level, - from_node, - to_node, - window, - ignore_multiple=True): - """ - This function asserts that there is context switch from the - from_node to the to_node: - - Args: - level (hashable): The level to which the node belongs - from_node (list): The node from which the task switches out - to_node (list): The node to which the task switches - window (tuple): A (start, end) tuple window of time where the - switch needs to be asserted - ignore_multiple (bool): If true, the function will ignore multiple - switches in the window, If false the assert will be true if and - only if there is a single switch within the specified window - - The function will only return true if and only if there is one - context switch between the specified nodes - """ - - from_node_index = self._topology.get_index(level, from_node) - to_node_index = self._topology.get_index(level, to_node) - - agg = self._aggregator(sconf.csum) - level_result = agg.aggregate(level=level) - - from_node_result = level_result[from_node_index] - to_node_result = level_result[to_node_index] - - from_time = self._relax_switch_window(from_node_result, "left", window) - if ignore_multiple: - to_time = self._relax_switch_window(to_node_result, "left", window) - else: - to_time = self._relax_switch_window( - to_node_result, - "right", window) - - if from_time and to_time: - if from_time < to_time: - return True - - return False - - def getRuntime(self, window=None, percent=False): - """Returns the Total Runtime of a task - - Args: - window (tuple): A (start, end) tuple to limit - the scope of the calculation - percent (boolean): If True, the result is returned - as a percentage of the total execution time - of the run. - """ - - agg = self._aggregator(sconf.residency_sum) - run_time = agg.aggregate(level="all", window=window)[0] - - if percent: - - if window: - begin, end = window - total_time = end - begin - else: - total_time = self._run.get_duration() - - run_time = run_time * 100 - run_time = run_time / total_time - - return run_time - - def assertRuntime( - self, - expected_value, - operator, - window=None, - percent=False): - """Assert on the total runtime of the task - - Args: - expected_value (double): The expected value of the total runtime - operator (func(a, b)): A binary operator function that - returns a boolean - window (tuple): A (start, end) tuple to limit the - scope of the calculation - percent (boolean): If True, the result is returned - as a percentage of the total execution time of the run. - """ - - run_time = self.getRuntime(window, percent) - return operator(run_time, expected_value) - - def getDutyCycle(self, window): - """Returns the duty cycle of the task - Args: - window (tuple): A (start, end) tuple to limit the - scope of the calculation - - Duty Cycle: - The percentage of time the task spends executing - in the given window - """ - - return self.getRuntime(window, percent=True) - - def assertDutyCycle(self, expected_value, operator, window): - """ - Args: - expected_value (double): The expected value of - the duty cycle - operator (func(a, b)): A binary operator function that - returns a boolean - window (tuple): A (start, end) tuple to limit the - scope of the calculation - - Duty Cycle: - The percentage of time the task spends executing - in the given window - """ - return self.assertRuntime( - expected_value, - operator, - window, - percent=True) - - def getFirstCpu(self, window=None): - """ - Args: - window (tuple): A (start, end) tuple to limit the - scope of the calculation - """ - - agg = self._aggregator(sconf.first_cpu) - result = agg.aggregate(level="cpu", window=window) - result = list(itertools.chain.from_iterable(result)) - - min_time = min(result) - if math.isinf(min_time): - return -1 - index = result.index(min_time) - return self._topology.get_node("cpu", index)[0] - - def assertFirstCpu(self, cpus, window=None): - """ - Args: - cpus (int, list): A list of acceptable CPUs - window (tuple): A (start, end) tuple to limit the scope - of the calculation - """ - first_cpu = self.getFirstCpu(window=window) - cpus = listify(cpus) - return first_cpu in cpus - - def generate_events(self, level, start_id=0, window=None): - """Generate events for the trace plot""" - - agg = self._aggregator(sconf.trace_event) - result = agg.aggregate(level=level, window=window) - events = [] - - for idx, level_events in enumerate(result): - if not len(level_events): - continue - events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist() - - return sorted(events, key = lambda x : x[0]) - - def plot(self, level="cpu", window=None, xlim=None): - """ - Returns: - trappy.plotter.AbstractDataPlotter - Call .view() to draw the graph - """ - - if not xlim: - if not window: - xlim = [0, self._run.get_duration()] - else: - xlim = list(window) - - events = {} - events[self.name] = self.generate_events(level, window) - names = [self.name] - num_lanes = self._topology.level_span(level) - lane_prefix = level.upper() + ": " - return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/SchedMatrix.py b/bart/SchedMatrix.py deleted file mode 100755 index a6684e0..0000000 --- a/bart/SchedMatrix.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -The SchedMatrix provides an ability to compare two executions -of benchmarks with multiple processes. - -For example, consider a benchmark that spawns 4 identical threads -and any two threads should exhibit a certain behaviours and the -remaining another identical but different behaviour. - -SchedMatrix creates a Matrix of Scheduler Waveform Correlations - -A = Reference Execution -B = Execution to be Evaluated - - +---+ +---+ - | | | | -A1, B3 +---+ +--+ +--------------+ - +---+ +---+ - | | | | -A2, B4 +--------------+ +--+ +---+ - +---+ +---+ - | | | | -A3, B1 +---+ +--+ +--------------+ - +---+ +---+ - | | | | -A4, B2 +--------------+ +--+ +---+ - - -Correlation Matrix - - B1 B2 B3 B4 -A1 1 0 1 0 - -A2 0 1 0 1 - -A3 1 0 1 0 - -A4 0 1 0 1 - - -Thus a success criteria can be defined as - -A1 has two similar threads in the -evaluated execution - -assertSiblings(A1, 2, operator.eq) -assertSiblings(A2, 2, operator.eq) -assertSiblings(A3, 2, operator.eq) -assertSiblings(A4, 2, operator.eq) -""" - - -import sys -import trappy -import numpy as np -from trappy.stats.Aggregator import MultiTriggerAggregator -from trappy.stats.Correlator import Correlator -from trappy.plotter.Utils import listify -from trappy.stats import SchedConf as sconf -from sheye import Utils - -POSITIVE_TOLERANCE = 0.80 - -# pylint: disable=invalid-name -# pylint: disable=too-many-arguments - - -class SchedMatrix(object): - - """Valid cases are: - - * Single execname, multiple PIDs - * PID List - * Multiple execname, one-to-one PID - association - """ - - def __init__( - self, - reference_trace, - trace, - topology, - execnames, - aggfunc=sconf.csum): - - run = Utils.init_run(trace) - reference_run = Utils.init_run(reference_trace) - - self._execnames = listify(execnames) - self._reference_pids = self._populate_pids(reference_run) - self._pids = self._populate_pids(run) - self._dimension = len(self._pids) - self._topology = topology - self._matrix = self._generate_matrix(run, reference_run, aggfunc) - - if len(self._pids) != len(self._reference_pids): - raise RuntimeError( - "The runs do not have the same number of PIDs for {0}".format( - str(execnames))) - - def _populate_pids(self, run): - """Populate the qualifying PIDs from the run""" - - if len(self._execnames) == 1: - return sconf.get_pids_for_process(run, self._execnames[0]) - - pids = [] - - for proc in self._execnames: - pids += sconf.get_pids_for_process(run, proc) - - return list(set(pids)) - - def _generate_matrix(self, run, reference_run, aggfunc): - """Generate the Correlation Matrix""" - - reference_aggs = [] - aggs = [] - - for idx in range(self._dimension): - - reference_aggs.append( - MultiTriggerAggregator( - sconf.sched_triggers( - reference_run, - self._reference_pids[idx], - trappy.sched.SchedSwitch - ), - self._topology, - aggfunc)) - - aggs.append( - MultiTriggerAggregator( - sconf.sched_triggers( - run, - self._pids[idx], - trappy.sched.SchedSwitch - ), - self._topology, - aggfunc)) - - agg_pair_gen = ((r_agg, agg) - for r_agg in reference_aggs for agg in aggs) - - # pylint fails to recognize numpy members. - # pylint: disable=no-member - matrix = np.zeros((self._dimension, self._dimension)) - # pylint: enable=no-member - - for (ref_result, test_result) in agg_pair_gen: - i = reference_aggs.index(ref_result) - j = aggs.index(test_result) - corr = Correlator( - ref_result, - test_result, - corrfunc=sconf.binary_correlate, - filter_gaps=True) - _, total = corr.correlate(level="cluster") - - matrix[i][j] = total - - return matrix - - def print_matrix(self): - """Print the correlation matrix""" - - # pylint fails to recognize numpy members. - # pylint: disable=no-member - np.set_printoptions(precision=5) - np.set_printoptions(suppress=False) - np.savetxt(sys.stdout, self._matrix, "%5.5f") - # pylint: enable=no-member - - def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE): - """Return the number of processes in the - reference trace that have a correlation - greater than tolerance - """ - - ref_pid_idx = self._reference_pids.index(pid) - pid_result = self._matrix[ref_pid_idx] - return len(pid_result[pid_result > tolerance]) - - def assertSiblings(self, pid, expected_value, operator, - tolerance=POSITIVE_TOLERANCE): - """Assert that the number of siblings in the reference - trace match the expected value and the operator - - Args: - pid: The PID in the reference trace - expected_value: the second argument to the operator - operator: a function of the type f(a, b) that returns - a boolean - """ - num_siblings = self.getSiblings(pid, tolerance) - return operator(num_siblings, expected_value) diff --git a/bart/SchedMultiAssert.py b/bart/SchedMultiAssert.py deleted file mode 100755 index 4d884b1..0000000 --- a/bart/SchedMultiAssert.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""A library for asserting scheduler scenarios based on the -statistics aggregation framework""" - -import re -import inspect -import trappy -from trappy.stats import SchedConf as sconf -from trappy.plotter.Utils import listify -from sheye.SchedAssert import SchedAssert -from sheye import Utils - -class SchedMultiAssert(object): - - """The primary focus of this class is to assert and verify - predefined scheduler scenarios. This does not compare parameters - across runs""" - - def __init__(self, run, topology, execnames): - """Args: - run (trappy.Run): A single trappy.Run object - or a path that can be passed to trappy.Run - topology(trappy.stats.Topology): The CPU topology - execname(str, list): List of execnames or single task - """ - - self._execnames = listify(execnames) - self._run = Utils.init_run(run) - self._pids = self._populate_pids() - self._topology = topology - self._asserts = self._populate_asserts() - self._populate_methods() - - def _populate_asserts(self): - """Populate SchedAsserts for the PIDs""" - - asserts = {} - - for pid in self._pids: - asserts[pid] = SchedAssert(self._run, self._topology, pid=pid) - - return asserts - - def _populate_pids(self): - """Map the input execnames to PIDs""" - - if len(self._execnames) == 1: - return sconf.get_pids_for_process(self._run, self._execnames[0]) - - pids = [] - - for proc in self._execnames: - pids += sconf.get_pids_for_process(self._run, proc) - - return list(set(pids)) - - def _create_method(self, attr_name): - """A wrapper function to create a dispatch function""" - - return lambda *args, **kwargs: self._dispatch(attr_name, *args, **kwargs) - - def _populate_methods(self): - """Populate Methods from SchedAssert""" - - for attr_name in dir(SchedAssert): - attr = getattr(SchedAssert, attr_name) - - valid_method = attr_name.startswith("get") or \ - attr_name.startswith("assert") - if inspect.ismethod(attr) and valid_method: - func = self._create_method(attr_name) - setattr(self, attr_name, func) - - def get_task_name(self, pid): - """Get task name for the PID""" - return self._asserts[pid].execname - - - def _dispatch(self, func_name, *args, **kwargs): - """The dispatch function to call into the SchedAssert - Method - """ - - assert_func = func_name.startswith("assert") - num_true = 0 - - rank = kwargs.pop("rank", None) - result = kwargs.pop("result", {}) - param = kwargs.pop("param", re.sub(r"assert|get", "", func_name, count=1).lower()) - - for pid in self._pids: - - if pid not in result: - result[pid] = {} - result[pid]["task_name"] = self.get_task_name(pid) - - attr = getattr(self._asserts[pid], func_name) - result[pid][param] = attr(*args, **kwargs) - - if assert_func and result[pid][param]: - num_true += 1 - - if assert_func and rank: - return num_true == rank - else: - return result - - def generate_events(self, level, window=None): - """Generate Events for the trace plot""" - - events = {} - for s_assert in self._asserts.values(): - events[s_assert.name] = s_assert.generate_events(level, window=window) - - return events - - def plot(self, level="cpu", window=None, xlim=None): - """ - Returns: - trappy.plotter.AbstractDataPlotter. Call .view() for - displaying the plot - """ - - if not xlim: - if not window: - xlim = [0, self._run.get_duration()] - else: - xlim = list(window) - - events = self.generate_events(level, window) - names = [s.name for s in self._asserts.values()] - num_lanes = self._topology.level_span(level) - lane_prefix = level.upper() + ": " - return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/__init__.py b/bart/__init__.py deleted file mode 100644 index fe4c006..0000000 --- a/bart/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Initialization for sheye""" - -from sheye import SchedAssert -from sheye import SchedMultiAssert -from sheye import SchedMatrix diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py new file mode 100755 index 0000000..a00002e --- /dev/null +++ b/bart/sched/SchedAssert.py @@ -0,0 +1,437 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A library for asserting scheduler scenarios based on the +statistics aggregation framework""" + +import trappy +import itertools +import math +from trappy.plotter.Utils import listify +from trappy.stats.Aggregator import MultiTriggerAggregator +from trappy.stats import SchedConf as sconf +from bart import Utils +import numpy as np + +# pylint: disable=invalid-name +# pylint: disable=too-many-arguments +class SchedAssert(object): + + """The primary focus of this class is to assert and verify + predefined scheduler scenarios. This does not compare parameters + across runs""" + + def __init__(self, run, topology, execname=None, pid=None): + """Args: + run (trappy.Run): A single trappy.Run object + or a path that can be passed to trappy.Run + topology(trappy.stats.Topology): The CPU topology + execname(str, optional): Optional execname of the task + under consideration. + PID(int): The PID of the task to be checked + + One of pid or execname is mandatory. If only execname + is specified, The current implementation will fail if + there are more than one processes with the same execname + """ + + run = Utils.init_run(run) + + if not execname and not pid: + raise ValueError("Need to specify at least one of pid or execname") + + self.execname = execname + self._run = run + self._pid = self._validate_pid(pid) + self._aggs = {} + self._topology = topology + self._triggers = sconf.sched_triggers(self._run, self._pid, + trappy.sched.SchedSwitch) + self.name = "{}-{}".format(self.execname, self._pid) + + def _validate_pid(self, pid): + """Validate the passed pid argument""" + + if not pid: + pids = sconf.get_pids_for_process(self._run, + self.execname) + + if len(pids) != 1: + raise RuntimeError( + "There should be exactly one PID {0} for {1}".format( + pids, + self.execname)) + + return pids[0] + + elif self.execname: + + pids = sconf.get_pids_for_process(self._run, + self.execname) + if pid not in pids: + raise RuntimeError( + "PID {0} not mapped to {1}".format( + pid, + self.execname)) + else: + self.execname = sconf.get_task_name(self._run, pid) + + return pid + + def _aggregator(self, aggfunc): + """ + Returns an aggregator corresponding to the + aggfunc, the aggregators are memoized for performance + + Args: + aggfunc (function(pandas.Series)): Function parameter that + accepts a pandas.Series object and returns a vector/scalar result + """ + + if aggfunc not in self._aggs.keys(): + self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers, + self._topology, + aggfunc) + return self._aggs[aggfunc] + + def getResidency(self, level, node, window=None, percent=False): + """ + Residency of the task is the amount of time it spends executing + a particular node of a topological level. For example: + + clusters=[] + big = [1,2] + little = [0,3,4,5] + + topology = Topology(clusters=clusters) + + level="cluster" + node = [1,2] + + Will return the residency of the task on the big cluster. If + percent is specified it will be normalized to the total RUNTIME + of the TASK + + Args: + level (hashable): The level to which the node belongs + node (list): The node for which residency needs to calculated + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + """ + + # Get the index of the node in the level + node_index = self._topology.get_index(level, node) + + agg = self._aggregator(sconf.residency_sum) + level_result = agg.aggregate(level=level, window=window) + + node_value = level_result[node_index] + + if percent: + total = agg.aggregate(level="all", window=window)[0] + node_value = node_value * 100 + node_value = node_value / total + + return node_value + + def assertResidency( + self, + level, + node, + expected_value, + operator, + window=None, + percent=False): + """ + Args: + level (hashable): The level to which the node belongs + node (list): The node for which residency needs to assert + expected_value (double): The expected value of the residency + operator (function): A binary operator function that returns + a boolean + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + """ + node_value = self.getResidency(level, node, window, percent) + return operator(node_value, expected_value) + + def getStartTime(self): + """ + Returns the first time the task ran + (across all CPUs) + """ + + agg = self._aggregator(sconf.first_time) + result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + return min(result[0]) + + def getEndTime(self): + """ + Returns the last time the task ran + (across all CPUs) + """ + + agg = self._aggregator(sconf.first_time) + agg = self._aggregator(sconf.last_time) + result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + return max(result[0]) + + def _relax_switch_window(self, series, direction, window): + """ + direction == "left" + return the last time the task was running + if no such time exists in the window, + extend the window's left extent to + getStartTime + + direction == "right" + return the first time the task was running + in the window. If no such time exists in the + window, extend the window's right extent to + getEndTime() + + The function returns a None if + len(series[series == TASK_RUNNING]) == 0 + even in the extended window + """ + + series = series[series == sconf.TASK_RUNNING] + w_series = sconf.select_window(series, window) + start, stop = window + + if direction == "left": + if len(w_series): + return w_series.index.values[-1] + else: + start_time = self.getStartTime() + w_series = sconf.select_window( + series, + window=( + start_time, + start)) + + if not len(w_series): + return None + else: + return w_series.index.values[-1] + + elif direction == "right": + if len(w_series): + return w_series.index.values[0] + else: + end_time = self.getEndTime() + w_series = sconf.select_window(series, window=(stop, end_time)) + + if not len(w_series): + return None + else: + return w_series.index.values[0] + else: + raise ValueError("direction should be either left or right") + + def assertSwitch( + self, + level, + from_node, + to_node, + window, + ignore_multiple=True): + """ + This function asserts that there is context switch from the + from_node to the to_node: + + Args: + level (hashable): The level to which the node belongs + from_node (list): The node from which the task switches out + to_node (list): The node to which the task switches + window (tuple): A (start, end) tuple window of time where the + switch needs to be asserted + ignore_multiple (bool): If true, the function will ignore multiple + switches in the window, If false the assert will be true if and + only if there is a single switch within the specified window + + The function will only return true if and only if there is one + context switch between the specified nodes + """ + + from_node_index = self._topology.get_index(level, from_node) + to_node_index = self._topology.get_index(level, to_node) + + agg = self._aggregator(sconf.csum) + level_result = agg.aggregate(level=level) + + from_node_result = level_result[from_node_index] + to_node_result = level_result[to_node_index] + + from_time = self._relax_switch_window(from_node_result, "left", window) + if ignore_multiple: + to_time = self._relax_switch_window(to_node_result, "left", window) + else: + to_time = self._relax_switch_window( + to_node_result, + "right", window) + + if from_time and to_time: + if from_time < to_time: + return True + + return False + + def getRuntime(self, window=None, percent=False): + """Returns the Total Runtime of a task + + Args: + window (tuple): A (start, end) tuple to limit + the scope of the calculation + percent (boolean): If True, the result is returned + as a percentage of the total execution time + of the run. + """ + + agg = self._aggregator(sconf.residency_sum) + run_time = agg.aggregate(level="all", window=window)[0] + + if percent: + + if window: + begin, end = window + total_time = end - begin + else: + total_time = self._run.get_duration() + + run_time = run_time * 100 + run_time = run_time / total_time + + return run_time + + def assertRuntime( + self, + expected_value, + operator, + window=None, + percent=False): + """Assert on the total runtime of the task + + Args: + expected_value (double): The expected value of the total runtime + operator (func(a, b)): A binary operator function that + returns a boolean + window (tuple): A (start, end) tuple to limit the + scope of the calculation + percent (boolean): If True, the result is returned + as a percentage of the total execution time of the run. + """ + + run_time = self.getRuntime(window, percent) + return operator(run_time, expected_value) + + def getDutyCycle(self, window): + """Returns the duty cycle of the task + Args: + window (tuple): A (start, end) tuple to limit the + scope of the calculation + + Duty Cycle: + The percentage of time the task spends executing + in the given window + """ + + return self.getRuntime(window, percent=True) + + def assertDutyCycle(self, expected_value, operator, window): + """ + Args: + expected_value (double): The expected value of + the duty cycle + operator (func(a, b)): A binary operator function that + returns a boolean + window (tuple): A (start, end) tuple to limit the + scope of the calculation + + Duty Cycle: + The percentage of time the task spends executing + in the given window + """ + return self.assertRuntime( + expected_value, + operator, + window, + percent=True) + + def getFirstCpu(self, window=None): + """ + Args: + window (tuple): A (start, end) tuple to limit the + scope of the calculation + """ + + agg = self._aggregator(sconf.first_cpu) + result = agg.aggregate(level="cpu", window=window) + result = list(itertools.chain.from_iterable(result)) + + min_time = min(result) + if math.isinf(min_time): + return -1 + index = result.index(min_time) + return self._topology.get_node("cpu", index)[0] + + def assertFirstCpu(self, cpus, window=None): + """ + Args: + cpus (int, list): A list of acceptable CPUs + window (tuple): A (start, end) tuple to limit the scope + of the calculation + """ + first_cpu = self.getFirstCpu(window=window) + cpus = listify(cpus) + return first_cpu in cpus + + def generate_events(self, level, start_id=0, window=None): + """Generate events for the trace plot""" + + agg = self._aggregator(sconf.trace_event) + result = agg.aggregate(level=level, window=window) + events = [] + + for idx, level_events in enumerate(result): + if not len(level_events): + continue + events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist() + + return sorted(events, key = lambda x : x[0]) + + def plot(self, level="cpu", window=None, xlim=None): + """ + Returns: + trappy.plotter.AbstractDataPlotter + Call .view() to draw the graph + """ + + if not xlim: + if not window: + xlim = [0, self._run.get_duration()] + else: + xlim = list(window) + + events = {} + events[self.name] = self.generate_events(level, window) + names = [self.name] + num_lanes = self._topology.level_span(level) + lane_prefix = level.upper() + ": " + return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py new file mode 100755 index 0000000..f41af14 --- /dev/null +++ b/bart/sched/SchedMatrix.py @@ -0,0 +1,210 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +The SchedMatrix provides an ability to compare two executions +of benchmarks with multiple processes. + +For example, consider a benchmark that spawns 4 identical threads +and any two threads should exhibit a certain behaviours and the +remaining another identical but different behaviour. + +SchedMatrix creates a Matrix of Scheduler Waveform Correlations + +A = Reference Execution +B = Execution to be Evaluated + + +---+ +---+ + | | | | +A1, B3 +---+ +--+ +--------------+ + +---+ +---+ + | | | | +A2, B4 +--------------+ +--+ +---+ + +---+ +---+ + | | | | +A3, B1 +---+ +--+ +--------------+ + +---+ +---+ + | | | | +A4, B2 +--------------+ +--+ +---+ + + +Correlation Matrix + + B1 B2 B3 B4 +A1 1 0 1 0 + +A2 0 1 0 1 + +A3 1 0 1 0 + +A4 0 1 0 1 + + +Thus a success criteria can be defined as + +A1 has two similar threads in the +evaluated execution + +assertSiblings(A1, 2, operator.eq) +assertSiblings(A2, 2, operator.eq) +assertSiblings(A3, 2, operator.eq) +assertSiblings(A4, 2, operator.eq) +""" + + +import sys +import trappy +import numpy as np +from trappy.stats.Aggregator import MultiTriggerAggregator +from trappy.stats.Correlator import Correlator +from trappy.plotter.Utils import listify +from trappy.stats import SchedConf as sconf +from bart import Utils + +POSITIVE_TOLERANCE = 0.80 + +# pylint: disable=invalid-name +# pylint: disable=too-many-arguments + + +class SchedMatrix(object): + + """Valid cases are: + + * Single execname, multiple PIDs + * PID List + * Multiple execname, one-to-one PID + association + """ + + def __init__( + self, + reference_trace, + trace, + topology, + execnames, + aggfunc=sconf.csum): + + run = Utils.init_run(trace) + reference_run = Utils.init_run(reference_trace) + + self._execnames = listify(execnames) + self._reference_pids = self._populate_pids(reference_run) + self._pids = self._populate_pids(run) + self._dimension = len(self._pids) + self._topology = topology + self._matrix = self._generate_matrix(run, reference_run, aggfunc) + + if len(self._pids) != len(self._reference_pids): + raise RuntimeError( + "The runs do not have the same number of PIDs for {0}".format( + str(execnames))) + + def _populate_pids(self, run): + """Populate the qualifying PIDs from the run""" + + if len(self._execnames) == 1: + return sconf.get_pids_for_process(run, self._execnames[0]) + + pids = [] + + for proc in self._execnames: + pids += sconf.get_pids_for_process(run, proc) + + return list(set(pids)) + + def _generate_matrix(self, run, reference_run, aggfunc): + """Generate the Correlation Matrix""" + + reference_aggs = [] + aggs = [] + + for idx in range(self._dimension): + + reference_aggs.append( + MultiTriggerAggregator( + sconf.sched_triggers( + reference_run, + self._reference_pids[idx], + trappy.sched.SchedSwitch + ), + self._topology, + aggfunc)) + + aggs.append( + MultiTriggerAggregator( + sconf.sched_triggers( + run, + self._pids[idx], + trappy.sched.SchedSwitch + ), + self._topology, + aggfunc)) + + agg_pair_gen = ((r_agg, agg) + for r_agg in reference_aggs for agg in aggs) + + # pylint fails to recognize numpy members. + # pylint: disable=no-member + matrix = np.zeros((self._dimension, self._dimension)) + # pylint: enable=no-member + + for (ref_result, test_result) in agg_pair_gen: + i = reference_aggs.index(ref_result) + j = aggs.index(test_result) + corr = Correlator( + ref_result, + test_result, + corrfunc=sconf.binary_correlate, + filter_gaps=True) + _, total = corr.correlate(level="cluster") + + matrix[i][j] = total + + return matrix + + def print_matrix(self): + """Print the correlation matrix""" + + # pylint fails to recognize numpy members. + # pylint: disable=no-member + np.set_printoptions(precision=5) + np.set_printoptions(suppress=False) + np.savetxt(sys.stdout, self._matrix, "%5.5f") + # pylint: enable=no-member + + def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE): + """Return the number of processes in the + reference trace that have a correlation + greater than tolerance + """ + + ref_pid_idx = self._reference_pids.index(pid) + pid_result = self._matrix[ref_pid_idx] + return len(pid_result[pid_result > tolerance]) + + def assertSiblings(self, pid, expected_value, operator, + tolerance=POSITIVE_TOLERANCE): + """Assert that the number of siblings in the reference + trace match the expected value and the operator + + Args: + pid: The PID in the reference trace + expected_value: the second argument to the operator + operator: a function of the type f(a, b) that returns + a boolean + """ + num_siblings = self.getSiblings(pid, tolerance) + return operator(num_siblings, expected_value) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py new file mode 100755 index 0000000..8e2f334 --- /dev/null +++ b/bart/sched/SchedMultiAssert.py @@ -0,0 +1,148 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""A library for asserting scheduler scenarios based on the +statistics aggregation framework""" + +import re +import inspect +import trappy +from trappy.stats import SchedConf as sconf +from trappy.plotter.Utils import listify +from bart.sched.SchedAssert import SchedAssert +from bart import Utils + +class SchedMultiAssert(object): + + """The primary focus of this class is to assert and verify + predefined scheduler scenarios. This does not compare parameters + across runs""" + + def __init__(self, run, topology, execnames): + """Args: + run (trappy.Run): A single trappy.Run object + or a path that can be passed to trappy.Run + topology(trappy.stats.Topology): The CPU topology + execname(str, list): List of execnames or single task + """ + + self._execnames = listify(execnames) + self._run = Utils.init_run(run) + self._pids = self._populate_pids() + self._topology = topology + self._asserts = self._populate_asserts() + self._populate_methods() + + def _populate_asserts(self): + """Populate SchedAsserts for the PIDs""" + + asserts = {} + + for pid in self._pids: + asserts[pid] = SchedAssert(self._run, self._topology, pid=pid) + + return asserts + + def _populate_pids(self): + """Map the input execnames to PIDs""" + + if len(self._execnames) == 1: + return sconf.get_pids_for_process(self._run, self._execnames[0]) + + pids = [] + + for proc in self._execnames: + pids += sconf.get_pids_for_process(self._run, proc) + + return list(set(pids)) + + def _create_method(self, attr_name): + """A wrapper function to create a dispatch function""" + + return lambda *args, **kwargs: self._dispatch(attr_name, *args, **kwargs) + + def _populate_methods(self): + """Populate Methods from SchedAssert""" + + for attr_name in dir(SchedAssert): + attr = getattr(SchedAssert, attr_name) + + valid_method = attr_name.startswith("get") or \ + attr_name.startswith("assert") + if inspect.ismethod(attr) and valid_method: + func = self._create_method(attr_name) + setattr(self, attr_name, func) + + def get_task_name(self, pid): + """Get task name for the PID""" + return self._asserts[pid].execname + + + def _dispatch(self, func_name, *args, **kwargs): + """The dispatch function to call into the SchedAssert + Method + """ + + assert_func = func_name.startswith("assert") + num_true = 0 + + rank = kwargs.pop("rank", None) + result = kwargs.pop("result", {}) + param = kwargs.pop("param", re.sub(r"assert|get", "", func_name, count=1).lower()) + + for pid in self._pids: + + if pid not in result: + result[pid] = {} + result[pid]["task_name"] = self.get_task_name(pid) + + attr = getattr(self._asserts[pid], func_name) + result[pid][param] = attr(*args, **kwargs) + + if assert_func and result[pid][param]: + num_true += 1 + + if assert_func and rank: + return num_true == rank + else: + return result + + def generate_events(self, level, window=None): + """Generate Events for the trace plot""" + + events = {} + for s_assert in self._asserts.values(): + events[s_assert.name] = s_assert.generate_events(level, window=window) + + return events + + def plot(self, level="cpu", window=None, xlim=None): + """ + Returns: + trappy.plotter.AbstractDataPlotter. Call .view() for + displaying the plot + """ + + if not xlim: + if not window: + xlim = [0, self._run.get_duration()] + else: + xlim = list(window) + + events = self.generate_events(level, window) + names = [s.name for s in self._asserts.values()] + num_lanes = self._topology.level_span(level) + lane_prefix = level.upper() + ": " + return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) diff --git a/bart/sched/__init__.py b/bart/sched/__init__.py new file mode 100644 index 0000000..c391ecb --- /dev/null +++ b/bart/sched/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Initialization for bart.sched""" + + +from bart.sched import SchedAssert +from bart.sched import SchedMultiAssert +from bart.sched import SchedMatrix -- cgit v1.2.3 From 910189779f1adb15e4a310c8de47558f1cb8b060 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:15:14 +0100 Subject: Move utils to common Create a common folder for common libraries Signed-off-by: Kapileshwar Singh --- bart/Utils.py | 29 ----------------------------- bart/common/Utils.py | 29 +++++++++++++++++++++++++++++ bart/common/__init__.py | 19 +++++++++++++++++++ bart/sched/SchedAssert.py | 2 +- bart/sched/SchedMatrix.py | 2 +- bart/sched/SchedMultiAssert.py | 2 +- 6 files changed, 51 insertions(+), 32 deletions(-) delete mode 100644 bart/Utils.py create mode 100644 bart/common/Utils.py create mode 100644 bart/common/__init__.py diff --git a/bart/Utils.py b/bart/Utils.py deleted file mode 100644 index bd49de9..0000000 --- a/bart/Utils.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Utility functions for sheye""" - -import trappy - -def init_run(trace): - """Initialize the Run Object""" - - if isinstance(trace, basestring): - return trappy.Run(trace) - - elif isinstance(trace, trappy.Run): - return trace - - raise ValueError("Invalid trace Object") diff --git a/bart/common/Utils.py b/bart/common/Utils.py new file mode 100644 index 0000000..bd49de9 --- /dev/null +++ b/bart/common/Utils.py @@ -0,0 +1,29 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Utility functions for sheye""" + +import trappy + +def init_run(trace): + """Initialize the Run Object""" + + if isinstance(trace, basestring): + return trappy.Run(trace) + + elif isinstance(trace, trappy.Run): + return trace + + raise ValueError("Invalid trace Object") diff --git a/bart/common/__init__.py b/bart/common/__init__.py new file mode 100644 index 0000000..26a260c --- /dev/null +++ b/bart/common/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Initialization for bart.common""" + + +from bart.common import Utils diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index a00002e..32c6d97 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -22,7 +22,7 @@ import math from trappy.plotter.Utils import listify from trappy.stats.Aggregator import MultiTriggerAggregator from trappy.stats import SchedConf as sconf -from bart import Utils +from bart.common import Utils import numpy as np # pylint: disable=invalid-name diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index f41af14..51cb60f 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -71,7 +71,7 @@ from trappy.stats.Aggregator import MultiTriggerAggregator from trappy.stats.Correlator import Correlator from trappy.plotter.Utils import listify from trappy.stats import SchedConf as sconf -from bart import Utils +from bart.common import Utils POSITIVE_TOLERANCE = 0.80 diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 8e2f334..95f625a 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -22,7 +22,7 @@ import trappy from trappy.stats import SchedConf as sconf from trappy.plotter.Utils import listify from bart.sched.SchedAssert import SchedAssert -from bart import Utils +from bart.common import Utils class SchedMultiAssert(object): -- cgit v1.2.3 From 4f3af61dea14c6012f7d97d70958428d926463a9 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:16:03 +0100 Subject: Add Module __init__ file Signed-off-by: Kapileshwar Singh --- bart/__init__.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 bart/__init__.py diff --git a/bart/__init__.py b/bart/__init__.py new file mode 100644 index 0000000..6b19202 --- /dev/null +++ b/bart/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Initialization for bart""" + +import bart.sched +import bart.common -- cgit v1.2.3 From 968df8c0a43e33fbc270abd6c4eb92c69994efad Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:16:36 +0100 Subject: Add .gitignore Signed-off-by: Kapileshwar Singh --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..69435bb --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +*.pyc +.ipynb_checkpoints -- cgit v1.2.3 From d2ff52058e9113f45032754d312a4f58bb4b34a7 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:34:00 +0100 Subject: sched: Add assertPeriod Assertion on the periodicity of a task Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index 32c6d97..ea57a6d 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -341,6 +341,42 @@ class SchedAssert(object): run_time = self.getRuntime(window, percent) return operator(run_time, expected_value) + def getPeriod(self, window=None, align="start"): + """Returns average period of the task in (ms) + + Args: + window (tuple): A (start, end) tuple to limit the + scope of the calculation + align: "start" aligns period calculation to switch-in events + "end" aligns the calculation to switch-out events + """ + + agg = self._aggregator(sconf.period) + period = agg.aggregate(level="all", window=window)[0] + total, length = map(sum, zip(*period)) + return (total * 1000) / length + + def assertPeriod( + self, + expected_value, + operator, + window=None, + align="start"): + """Assert on the period of the task + + Args: + expected_value (double): The expected value of the total runtime + operator (func(a, b)): A binary operator function that + returns a boolean + window (tuple): A (start, end) tuple to limit the + scope of the calculation + percent (boolean): If True, the result is returned + as a percentage of the total execution time of the run. + """ + + period = self.getPeriod(window, align) + return operator(period, expected_value) + def getDutyCycle(self, window): """Returns the duty cycle of the task Args: -- cgit v1.2.3 From 8a51ec958a6d3477da3b2385c13c26e367d6034f Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:51:13 +0100 Subject: common: Move ThermalAssert to a generic Analyzer class Signed-off-by: Kapileshwar Singh --- bart/ThermalAssert.py | 54 ------------------------------------------------ bart/common/Analyzer.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++ bart/common/__init__.py | 1 + 3 files changed, 56 insertions(+), 54 deletions(-) delete mode 100644 bart/ThermalAssert.py create mode 100644 bart/common/Analyzer.py diff --git a/bart/ThermalAssert.py b/bart/ThermalAssert.py deleted file mode 100644 index 44f2790..0000000 --- a/bart/ThermalAssert.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Allow the user to assert various conditions -based on the grammar defined in trappy.stats.grammar. The class is -also intended to have aggregator based functionality. This is not -implemented yet. -""" - -from trappy.stats.grammar import Parser -import warnings -import numpy as np - -# pylint: disable=invalid-name - -class ThermalAssert(object): - - """ - Args: - data (trappy.Run): A trappy.Run instance - config (dict): A dictionary of variables, classes - and functions that can be used in the statements - """ - - def __init__(self, data, config): - self._parser = Parser(data, config) - - def assertStatement(self, statement): - """Solve the statement for a boolean result""" - - result = self.getStatement(statement) - # pylint: disable=no-member - if not (isinstance(result, bool) or isinstance(result, np.bool_)): - warnings.warn( - "solution of {} is not an instance of bool".format(statement)) - return result - # pylint: enable=no-member - - def getStatement(self, statement): - """Evaluate the statement""" - - return self._parser.solve(statement) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py new file mode 100644 index 0000000..87ef18b --- /dev/null +++ b/bart/common/Analyzer.py @@ -0,0 +1,55 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Allow the user to assert various conditions +based on the grammar defined in trappy.stats.grammar. The class is +also intended to have aggregator based functionality. This is not +implemented yet. +""" + +from trappy.stats.grammar import Parser +import warnings +import numpy as np + +# pylint: disable=invalid-name + + +class Analyzer(object): + + """ + Args: + data (trappy.Run): A trappy.Run instance + config (dict): A dictionary of variables, classes + and functions that can be used in the statements + """ + + def __init__(self, data, config): + self._parser = Parser(data, config) + + def assertStatement(self, statement): + """Solve the statement for a boolean result""" + + result = self.getStatement(statement) + # pylint: disable=no-member + if not (isinstance(result, bool) or isinstance(result, np.bool_)): + warnings.warn( + "solution of {} is not an instance of bool".format(statement)) + return result + # pylint: enable=no-member + + def getStatement(self, statement): + """Evaluate the statement""" + + return self._parser.solve(statement) diff --git a/bart/common/__init__.py b/bart/common/__init__.py index 26a260c..ea66887 100644 --- a/bart/common/__init__.py +++ b/bart/common/__init__.py @@ -17,3 +17,4 @@ from bart.common import Utils +from bart.common import Analyzer -- cgit v1.2.3 From 5d3f86dc90f78e3f302dd9c923e4ebc256f6f9ed Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:52:50 +0100 Subject: common: Teach Analyzer to accept Topologies Signed-off-by: Kapileshwar Singh --- bart/common/Analyzer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index 87ef18b..c4a3ad3 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -35,8 +35,8 @@ class Analyzer(object): and functions that can be used in the statements """ - def __init__(self, data, config): - self._parser = Parser(data, config) + def __init__(self, data, config, topology=None): + self._parser = Parser(data, config, topology) def assertStatement(self, statement): """Solve the statement for a boolean result""" -- cgit v1.2.3 From d87e98917a6de153e9e0a79d782b33e1e1465444 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 22:55:10 +0100 Subject: common: Allow getStatement to reference aggregated data optionally Signed-off-by: Kapileshwar Singh --- bart/common/Analyzer.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index c4a3ad3..60f9617 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -38,10 +38,11 @@ class Analyzer(object): def __init__(self, data, config, topology=None): self._parser = Parser(data, config, topology) - def assertStatement(self, statement): + def assertStatement(self, statement, select=None): """Solve the statement for a boolean result""" - result = self.getStatement(statement) + result = self.getStatement(statement, select=select) + # pylint: disable=no-member if not (isinstance(result, bool) or isinstance(result, np.bool_)): warnings.warn( @@ -49,7 +50,19 @@ class Analyzer(object): return result # pylint: enable=no-member - def getStatement(self, statement): + def getStatement(self, statement, reference=False, select=None): """Evaluate the statement""" - return self._parser.solve(statement) + result = self._parser.solve(statement) + + # pylint: disable=no-member + if np.isscalar(result): + return result + # pylint: enable=no-member + + if select is not None and len(result): + result = result[select] + if reference: + result = self._parser.ref(result) + + return result -- cgit v1.2.3 From 83910ea5096d0685ecf6753c03af5de2d3ec43e2 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 23:01:38 +0100 Subject: examples: Example for using the analyzer for thermal assertions Signed-off-by: Kapileshwar Singh --- examples/thermal.py | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 examples/thermal.py diff --git a/examples/thermal.py b/examples/thermal.py new file mode 100644 index 0000000..33c2afb --- /dev/null +++ b/examples/thermal.py @@ -0,0 +1,87 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +An example file for usage of Analyzer for thermal assertions +""" +from bart.common.Analyzer import Analyzer +from trappy.stats.Topology import Topology +import unittest +import trappy + + +class TestThermal(unittest.TestCase): + + @classmethod + def setUpClass(cls): + # We can run a workload invocation script here + # Which then copies the required traces for analysis to + # the host. + trace_file = "update_a_trace_path_here" + run = trappy.Run(trace_file, "test_run") + + # Define the parameters that you intend to use in the grammar + config = {} + config["THERMAL"] = trappy.thermal.Thermal + config["OUT"] = trappy.cpu_power.CpuOutPower + config["IN"] = trappy.cpu_power.CpuInPower + config["PID"] = trappy.pid_controller.PIDController + config["GOVERNOR"] = trappy.thermal.ThermalGovernor + config["CONTROL_TEMP"] = 77000 + config["SUSTAINABLE_POWER"] = 2500 + config["EXPECTED_TEMP_QRT"] = 95 + config["EXPECTED_STD_PCT"] = 5 + + # Define a Topology + cls.BIG = '000000f0' + cls.LITTLE = '0000000f' + cls.tz = 0 + cls.analyzer = Analyzer(run, config) + + def test_temperature_quartile(self): + """Assert Temperature quartile""" + + self.assertTrue(self.analyzer.assertStatement( + "numpy.percentile(THERMAL:temp, EXPECTED_TEMP_QRT) < (CONTROL_TEMP + 5000)")) + + def test_average_temperature(self): + """Assert Average temperature""" + + self.assertTrue(self.analyzer.assertStatement( + "numpy.mean(THERMAL:temp) < CONTROL_TEMP", select=self.tz)) + + def test_temp_stdev(self): + """Assert StdDev(temp) as % of mean""" + + self.assertTrue(self.analyzer.assertStatement( + "(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\ + < EXPECTED_STD_PCT", select=self.tz)) + + def test_zero_load_input_power(self): + """Test power demand when load is zero""" + + zero_load_power_big = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ + & (IN:dynamic_power > 0)", reference=True, select=self.BIG) + self.assertEquals(len(zero_load_power_big), 0) + + zero_load_power_little = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ + & (IN:dynamic_power > 0)", reference=True, select=self.LITTLE) + self.assertEquals(len(zero_load_power_little), 0) + + def test_sustainable_power(self): + """temp > control_temp, allocated_power < sustainable_power""" + + self.analyzer.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\ + (PID:output > SUSTAINABLE_POWER)", reference=True, select=0) -- cgit v1.2.3 From de3b29f6ac7e54d3dca657ad8529152e42f564ba Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 13 Aug 2015 23:31:33 +0100 Subject: notebooks: Add Example notebook for Thermal The notebook explains the usage of the Analyzer class for asserting Thermal Behaviours. Signed-off-by: Kapileshwar Singh --- notebooks/thermal/Thermal.ipynb | 304 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 304 insertions(+) create mode 100644 notebooks/thermal/Thermal.ipynb diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb new file mode 100644 index 0000000..37265b6 --- /dev/null +++ b/notebooks/thermal/Thermal.ipynb @@ -0,0 +1,304 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import trappy \n", + "\n", + "config = {}\n", + "\n", + "# TRAPpy Events\n", + "config[\"THERMAL\"] = trappy.thermal.Thermal\n", + "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", + "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", + "config[\"PID\"] = trappy.pid_controller.PIDController\n", + "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", + "\n", + "# Control Temperature\n", + "config[\"CONTROL_TEMP\"] = 77000\n", + "\n", + "# A temperature margin of 2.5 degrees Celsius\n", + "config[\"TEMP_MARGIN\"] = 2500\n", + "\n", + "# The Sustainable power at the control Temperature\n", + "config[\"SUSTAINABLE_POWER\"] = 2500\n", + "\n", + "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", + "config[\"EXPECTED_TEMP_QRT\"] = 95\n", + "\n", + "# Maximum expected Standard Deviation as a percentage\n", + "# of mean temperature\n", + "config[\"EXPECTED_STD_PCT\"] = 5\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run Object" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Create a Trace object\n", + "\n", + "TRACE = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/thermal\"\n", + "run = trappy.Run(TRACE, \"SomeBenchMark\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Assertions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Create an Assertion Object\n", + "\n", + "from bart.common.Analyzer import Analyzer\n", + "t = Analyzer(run, config)\n", + "\n", + "BIG = '000000f0'\n", + "LITTLE = '0000000f'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assertion: Load and Dynamic Power" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", + "\n", + " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", + " \n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", + "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" + ] + } + ], + "source": [ + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", + "\n", + " \n", + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Assertion: Control Temperature and Sustainable Power" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", + "\n", + "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", + "\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" + ] + } + ], + "source": [ + "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", + " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", + "\n", + "if len(result):\n", + " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", + "else:\n", + " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Statistics" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if the mean temperauture is less than CONTROL_TEMP" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expresssed as a percentage of the mean" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "2.2390646863103232" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} -- cgit v1.2.3 From b581b70ed9f5e7bc2d49edcd8d9e8298f5ca28c8 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 14 Aug 2015 00:58:45 +0100 Subject: notebooks: Add Example notebook for Sched The example notebook shows how the sched assertions can be used for asserting sched deadline behaviours Signed-off-by: Kapileshwar Singh --- notebooks/sched/SchedDeadline.ipynb | 642 ++++++++++++++++++++++++++++++++++++ 1 file changed, 642 insertions(+) create mode 100644 notebooks/sched/SchedDeadline.ipynb diff --git a/notebooks/sched/SchedDeadline.ipynb b/notebooks/sched/SchedDeadline.ipynb new file mode 100644 index 0000000..0172bb2 --- /dev/null +++ b/notebooks/sched/SchedDeadline.ipynb @@ -0,0 +1,642 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from trappy.stats.Topology import Topology\n", + "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", + "from bart.sched.SchedAssert import SchedAssert\n", + "import trappy\n", + "import os\n", + "import operator\n", + "import json\n", + "\n", + "#Define a CPU Topology (for multi-cluster systems)\n", + "BIG = [1, 2]\n", + "LITTLE = [0, 3, 4, 5]\n", + "CLUSTERS = [BIG, LITTLE]\n", + "topology = Topology(clusters=CLUSTERS)\n", + "\n", + "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", + "\n", + "THRESHOLD = 10.0\n", + "def between_threshold(a, b):\n", + " return abs(((a - b) * 100.0) / b) < THRESHOLD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Periodic Yield" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", + "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", + "\n", + "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: Period\n", + "{\n", + " \"1844\": {\n", + " \"period\": 1.0085000000401578, \n", + " \"task_name\": \"periodic_yield\"\n", + " }, \n", + " \"1845\": {\n", + " \"period\": 29.822017857142669, \n", + " \"task_name\": \"periodic_yield\"\n", + " }\n", + "}\n", + "\n", + "PASS: DutyCycle\n", + "{\n", + " \"1844\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.074749999998857675\n", + " }, \n", + " \"1845\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.03862499999343072\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "\n", + "# Assert Period\n", + "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", + "if s.assertPeriod(30, between_threshold, rank=1):\n", + " print \"PASS: Period\"\n", + " print json.dumps(s.getPeriod(), indent=3)\n", + "\n", + "print \"\"\n", + " \n", + "# Assert DutyCycle \n", + "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CPU Hog" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: DutyCycle\n", + "{\n", + " \"1852\": {\n", + " \"task_name\": \"cpuhog\", \n", + " \"dutycycle\": 10.050119999991693\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", + "s.plot().view()\n", + "\n", + "# Assert DutyCycle\n", + "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Changing Reservations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WINDOW -> [0.00, 2.00]\n", + "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [2.00, 4.00]\n", + "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [4.00, 6.00]\n", + "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [6.00, 8.00]\n", + "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [8.00, 10.00]\n", + "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [10.00, 12.00]\n", + "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [12.00, 14.00]\n", + "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [14.00, 16.00]\n", + "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [16.00, 18.00]\n", + "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [18.00, 20.00]\n", + "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [20.00, 22.00]\n", + "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", + "\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", + "s.plot().view()\n", + "\n", + "NUM_PHASES = 10\n", + "PHASE_DURATION = 2\n", + "start = s.getStartTime()\n", + "DUTY_CYCLE_FACTOR = 10\n", + "\n", + "\n", + "for phase in range(NUM_PHASES + 1):\n", + " window = (start + (phase * PHASE_DURATION),\n", + " start + ((phase + 1) * PHASE_DURATION))\n", + " \n", + " if phase % 2 == 0:\n", + " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", + " else:\n", + " DUTY_CYCLE = 100\n", + "\n", + "\n", + " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", + " window[1])\n", + " \n", + " \n", + " \n", + " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", + " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " else:\n", + " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " \n", + " print \"\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} -- cgit v1.2.3 From 90128e97153e9f9d1dfddd1200ffa270313ee69d Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 14 Aug 2015 22:11:01 +0100 Subject: Add README.md Signed-off-by: Kapileshwar Singh --- README.md | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..67da41c --- /dev/null +++ b/README.md @@ -0,0 +1,77 @@ +# Introduction + +The Behavioural Analysis and Regression Toolkit is based on [TRAPpy](https://github.com/ARM-software/trappy). The primary goal is to assert behaviours using the FTrace output from the kernel + +## Target Audience +The framework is designed to cater to a wide range of audience. Aiding developers as well as automating +the testing of "difficult to test" behaviours. + +### Kernel Developers + +Making sure that the code that you are writing is doing the right thing. + +### Performance Engineers + +Plotting/Asserting performance behaviours between different revisions of the kernel + +### Quality Assurance/Release Engineers +Verifying behaviours when different components/patches are integrated + +# Installation + +Clone the [BART]( https://github.com/ARM-software/bart) and [TRAPpy]( https://github.com/ARM-software/trappy) repos + + git clone git@github.com:ARM-software/bart.git + git clone git@github.com:ARM-software/trappy.git + +Add the directories to your PYTHONPATH + + export PYTHONPATH=$BASE_DIR/bart:$BASE_DIR/trappy:$PYTHONPATH + +Install dependencies + + apt-get install ipython-notebook python-pandas + +[IPython](http://ipython.org/notebook.html) notebook is a web based interactive python programming interface. +It is required if you plan to use interactive plotting in BART. + +# Trace Analysis Language + +BART also provides a generic Trace Analysis Language, which allows the user to construct complex relation statements on trace data and assert their expected behaviours. The usage of the Analyzer module can be seen for the thermal behaviours [here](https://github.com/sinkap/bart/blob/master/notebooks/thermal/Thermal.ipynb) + +# Scheduler Assertions + +Enables assertion and the calculation of the following parameters: + +### Runtime + +The total time that the task spent on a CPU executing. + +### Switch + +Assert that a task switched between CPUs/Clusters in a given window of time + +### Duty Cycle + +The ratio of the execution time to the total time. + +### Period + +The average difference between two switch-in or two switch-out events of a task + +### First CPU + +The first CPU that a task ran on. + +### Residency + +Calculate and assert the total residency of a task on a CPU or cluster + +### Examples + +The Scheduler assertions also use TRAPpy's EventPlot to provide a kernelshark like timeline +for the tasks under consideration. (in IPython notebooks). + +A notebook explaining the usage of the framework for asserting the deadline scheduler behaviours can be seen [here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html) + + -- cgit v1.2.3 From 75790ddb54398df6c01cf23daa224d30180416f9 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 17 Aug 2015 16:04:03 +0100 Subject: Change reference from forked to main repo Signed-off-by: Kapileshwar Singh --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 67da41c..8ad5f92 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ It is required if you plan to use interactive plotting in BART. # Trace Analysis Language -BART also provides a generic Trace Analysis Language, which allows the user to construct complex relation statements on trace data and assert their expected behaviours. The usage of the Analyzer module can be seen for the thermal behaviours [here](https://github.com/sinkap/bart/blob/master/notebooks/thermal/Thermal.ipynb) +BART also provides a generic Trace Analysis Language, which allows the user to construct complex relation statements on trace data and assert their expected behaviours. The usage of the Analyzer module can be seen for the thermal behaviours [here](https://github.com/ARM-software/bart/blob/master/notebooks/thermal/Thermal.ipynb) # Scheduler Assertions -- cgit v1.2.3 From c5a7c3661de1399be2d3f8e6895643dfa03eba78 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 20 Aug 2015 18:21:24 -0700 Subject: common: Add select_window function Add a function that allows selecting a window of time series data Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index bd49de9..ecb83d3 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -27,3 +27,17 @@ def init_run(trace): return trace raise ValueError("Invalid trace Object") + +def select_window(series, window): + """Library Function to select a portion of + pandas time series + """ + + if not window: + return series + + start, stop = window + ix = series.index + selector = ((ix >= start) & (ix <= stop)) + window_series = series[selector] + return window_series -- cgit v1.2.3 From 51c415b181172e7b4e6654f3a107e0d4793f1df4 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 20 Aug 2015 19:26:08 -0700 Subject: thermal: Introduce ThermalAssert Introducing thermal specific assertions with assertThermalResidency Signed-off-by: Kapileshwar Singh --- bart/__init__.py | 1 + bart/thermal/ThermalAssert.py | 94 +++++++++++++++++++++++++++++++++++++++++++ bart/thermal/__init__.py | 19 +++++++++ 3 files changed, 114 insertions(+) create mode 100644 bart/thermal/ThermalAssert.py create mode 100644 bart/thermal/__init__.py diff --git a/bart/__init__.py b/bart/__init__.py index 6b19202..7dbf066 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -17,3 +17,4 @@ import bart.sched import bart.common +import bart.thermal diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py new file mode 100644 index 0000000..684405c --- /dev/null +++ b/bart/thermal/ThermalAssert.py @@ -0,0 +1,94 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""A thermal specific library to assert certain thermal +behaviours +""" + +from bart.common import Utils +from bart.common.Analyzer import Analyzer +import numpy as np + + +# pylint: disable=invalid-name +# pylint: disable=too-many-arguments +class ThermalAssert(object): + + """A class that accepts a TRAPpy Run object and + provides assertions for thermal behaviours""" + + def __init__(self, run, config=None): + + self._run = Utils.init_run(run) + self._analyzer = Analyzer(self._run, config) + + def getThermalResidency(self, temp_range, window, percent=False): + """Returns the total time spent in a given temperature range + Args: + temp_range (tuple): A tuple of (low_temp, high_temp) + which the specifies the range of temperature that + one intends to calculate the residency for. + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: Returns the residency as a percentage of the total + duration of the trace + """ + + # Get a pivoted thermal temperature data using the grammar + data = self._analyzer.getStatement("trappy.thermal.Thermal:temp") + + result = {} + for pivot, data_frame in data.groupby(axis=1, level=0): + + series = data_frame[pivot] + series = Utils.select_window(series, window) + mask = (series >= temp_range[0]) & (series <= temp_range[1]) + index = series.index.values + # pylint fails to recognize numpy members. + # pylint: disable=no-member + shift_index = np.roll(index, 1) + # pylint: enable=no-member + shift_index[0] = 0 + + result[pivot] = sum((index - shift_index)[mask.values]) + + if percent: + result[pivot] = ( + result[pivot] * 100.0) / self._run.get_duration() + + return result + + def assertThermalResidency( + self, + expected_value, + operator, + temp_range, + window, + percent=False): + """ + Args: + expected_value (double): The expected value of the residency + operator (function): A binary operator function that returns + a boolean + temp_range (tuple): A tuple of (low_temp, high_temp) + which the specifies the range of temperature that + one intends to calculate the residency for. + window (tuple): A (start, end) tuple to limit the scope of the + residency calculation. + percent: Returns the residency as a percentage of the total + duration of the trace + """ + + residency = self.getThermalResidency(temp_range, window, percent) + return operator(residency, expected_value) diff --git a/bart/thermal/__init__.py b/bart/thermal/__init__.py new file mode 100644 index 0000000..c9baee0 --- /dev/null +++ b/bart/thermal/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Initialization for bart.thermal""" + + +import bart.thermal.ThermalAssert -- cgit v1.2.3 From feeb386efe01fb3dd4e70e216337c8a4b476cb9a Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Sep 2015 18:01:40 +0100 Subject: setup: Add setup.py Signed-off-by: Kapileshwar Singh --- setup.py | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 setup.py diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9962e73 --- /dev/null +++ b/setup.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from setuptools import setup, find_packages + + +VERSION = "1.0.0" + +LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general +expectation of the state of the system while targeting a single or set of heuristics. +This is particularly helpful when there are large number of factors that can change +the behaviour of the system and testing all permutations of these input parameters +is impossible. In such a scenario an assertion of the final expectation can be +useful in managing performance and regression. + +The Behavioural Analysis and Regression Toolkit is based on TRAPpy. The primary goal is +to assert behaviours using the FTrace output from the kernel +""" + +REQUIRES = [ + "TRAPpy==1.0.0", +] + +setup(name='BART', + version=VERSION, + license="Apache v2", + author="ARM-BART", + author_email="bart@arm.com", + description="Behavioural Analysis and Regression Toolkit", + long_description=LONG_DESCRIPTION, + url="http://arm-software.github.io/bart", + packages=find_packages(), + include_package_data=True, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Environment :: Web Environment", + "Environment :: Console", + "License :: OSI Approved :: Apache Software License", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 2.7", + # As we depend on trace data from the Linux Kernel/FTrace + "Topic :: System :: Operating System Kernels :: Linux", + "Topic :: Scientific/Engineering :: Visualization" + ], + install_requires=REQUIRES + ) -- cgit v1.2.3 From 95a9c2a9188e1b71a171a7308ebf01e5d1dfdc34 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 18 Sep 2015 17:23:00 +0100 Subject: Add example_trace_dat* to gitignore These directories will be used to fetch the traces from web for examples Signed-off-by: Kapileshwar Singh --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 69435bb..32e54fb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.pyc .ipynb_checkpoints +example_trace_dat* -- cgit v1.2.3 From 44960615001ece61f9354fed9d797af84f99b4b0 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 18 Sep 2015 17:24:17 +0100 Subject: notebooks: thermal: Add ThermalResidency examples Signed-off-by: Kapileshwar Singh --- notebooks/thermal/Thermal.ipynb | 115 +++++++++++++++++++++++++++++++++++----- 1 file changed, 101 insertions(+), 14 deletions(-) diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb index 37265b6..f9b65db 100644 --- a/notebooks/thermal/Thermal.ipynb +++ b/notebooks/thermal/Thermal.ipynb @@ -43,6 +43,44 @@ "config[\"EXPECTED_STD_PCT\"] = 5\n" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Get the Trace" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Fetching trace file..\n" + ] + } + ], + "source": [ + "import urllib\n", + "import os\n", + "\n", + "TRACE_DIR = \"example_trace_dat_thermal\"\n", + "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", + "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", + "\n", + "if not os.path.isdir(TRACE_DIR):\n", + " os.mkdir(TRACE_DIR)\n", + "\n", + "if not os.path.isfile(TRACE_FILE):\n", + " print \"Fetching trace file..\"\n", + " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -52,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 58, "metadata": { "collapsed": false }, @@ -60,8 +98,7 @@ "source": [ "# Create a Trace object\n", "\n", - "TRACE = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/thermal\"\n", - "run = trappy.Run(TRACE, \"SomeBenchMark\")" + "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" ] }, { @@ -73,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 59, "metadata": { "collapsed": false }, @@ -109,7 +146,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 60, "metadata": { "collapsed": false }, @@ -162,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 61, "metadata": { "collapsed": false }, @@ -201,7 +238,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 62, "metadata": { "collapsed": false }, @@ -212,7 +249,7 @@ "True" ] }, - "execution_count": 6, + "execution_count": 62, "metadata": {}, "output_type": "execute_result" } @@ -230,7 +267,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 63, "metadata": { "collapsed": false }, @@ -241,7 +278,7 @@ "True" ] }, - "execution_count": 7, + "execution_count": 63, "metadata": {}, "output_type": "execute_result" } @@ -259,7 +296,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 64, "metadata": { "collapsed": false }, @@ -267,10 +304,10 @@ { "data": { "text/plain": [ - "2.2390646863103232" + "2.2390646863105119" ] }, - "execution_count": 10, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" } @@ -278,6 +315,56 @@ "source": [ "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Thermal Residency" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", + "The 86.58th percentile temperature is 78.28\n" + ] + } + ], + "source": [ + "from bart.thermal.ThermalAssert import ThermalAssert\n", + "\n", + "t_assert = ThermalAssert(run)\n", + "end = run.get_duration()\n", + "\n", + "LOW = 0\n", + "HIGH = 78000\n", + "\n", + "# The thermal residency gives the percentage (or absolute time) spent in the\n", + "# specified temperature range. \n", + "\n", + "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", + " window=(0, end),\n", + " percent=True)\n", + "\n", + "for tz_id in result:\n", + " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", + " result[tz_id],\n", + " LOW/1000,\n", + " HIGH/1000)\n", + " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", + " \n", + " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", + " " + ] } ], "metadata": { @@ -296,7 +383,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", - "version": "2.7.9" + "version": "2.7.6" } }, "nbformat": 4, -- cgit v1.2.3 From 0a86c5e3064b6993f537d0b6b3d17875913e55a8 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 21 Sep 2015 20:32:57 +0100 Subject: notebooks: thermal: downgrade to nbformat v3 Ubuntu 14.04 LTS doesn't have ipython version 3. Downgrade the format of the notebooks so that they can be used with a default Ubuntu installation --- notebooks/thermal/Thermal.ipynb | 746 ++++++++++++++++++++-------------------- 1 file changed, 377 insertions(+), 369 deletions(-) diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb index f9b65db..e08ca17 100644 --- a/notebooks/thermal/Thermal.ipynb +++ b/notebooks/thermal/Thermal.ipynb @@ -1,391 +1,399 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Configuration" - ] + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 }, - "outputs": [], - "source": [ - "import trappy \n", - "\n", - "config = {}\n", - "\n", - "# TRAPpy Events\n", - "config[\"THERMAL\"] = trappy.thermal.Thermal\n", - "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", - "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", - "config[\"PID\"] = trappy.pid_controller.PIDController\n", - "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", - "\n", - "# Control Temperature\n", - "config[\"CONTROL_TEMP\"] = 77000\n", - "\n", - "# A temperature margin of 2.5 degrees Celsius\n", - "config[\"TEMP_MARGIN\"] = 2500\n", - "\n", - "# The Sustainable power at the control Temperature\n", - "config[\"SUSTAINABLE_POWER\"] = 2500\n", - "\n", - "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", - "config[\"EXPECTED_TEMP_QRT\"] = 95\n", - "\n", - "# Maximum expected Standard Deviation as a percentage\n", - "# of mean temperature\n", - "config[\"EXPECTED_STD_PCT\"] = 5\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Get the Trace" - ] + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" }, + "name": "" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ { - "cell_type": "code", - "execution_count": 69, - "metadata": { - "collapsed": false - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Fetching trace file..\n" + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Configuration" ] - } - ], - "source": [ - "import urllib\n", - "import os\n", - "\n", - "TRACE_DIR = \"example_trace_dat_thermal\"\n", - "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", - "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", - "\n", - "if not os.path.isdir(TRACE_DIR):\n", - " os.mkdir(TRACE_DIR)\n", - "\n", - "if not os.path.isfile(TRACE_FILE):\n", - " print \"Fetching trace file..\"\n", - " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Run Object" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Create a Trace object\n", - "\n", - "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Assertions" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Create an Assertion Object\n", - "\n", - "from bart.common.Analyzer import Analyzer\n", - "t = Analyzer(run, config)\n", - "\n", - "BIG = '000000f0'\n", - "LITTLE = '0000000f'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assertion: Load and Dynamic Power" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", - "\n", - " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", - " \n", - "" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "metadata": { - "collapsed": false - }, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", - "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" + "cell_type": "code", + "collapsed": false, + "input": [ + "import trappy \n", + "\n", + "config = {}\n", + "\n", + "# TRAPpy Events\n", + "config[\"THERMAL\"] = trappy.thermal.Thermal\n", + "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", + "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", + "config[\"PID\"] = trappy.pid_controller.PIDController\n", + "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", + "\n", + "# Control Temperature\n", + "config[\"CONTROL_TEMP\"] = 77000\n", + "\n", + "# A temperature margin of 2.5 degrees Celsius\n", + "config[\"TEMP_MARGIN\"] = 2500\n", + "\n", + "# The Sustainable power at the control Temperature\n", + "config[\"SUSTAINABLE_POWER\"] = 2500\n", + "\n", + "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", + "config[\"EXPECTED_TEMP_QRT\"] = 95\n", + "\n", + "# Maximum expected Standard Deviation as a percentage\n", + "# of mean temperature\n", + "config[\"EXPECTED_STD_PCT\"] = 5\n" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Get the Trace" ] - } - ], - "source": [ - "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", - " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", - "if len(result):\n", - " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", - "else:\n", - " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", - "\n", - " \n", - "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", - " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", - "if len(result):\n", - " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", - "else:\n", - " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assertion: Control Temperature and Sustainable Power" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", - "\n", - "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", - "\n", - "" - ] - }, - { - "cell_type": "code", - "execution_count": 61, - "metadata": { - "collapsed": false - }, - "outputs": [ + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import urllib\n", + "import os\n", + "\n", + "TRACE_DIR = \"example_trace_dat_thermal\"\n", + "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", + "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", + "\n", + "if not os.path.isdir(TRACE_DIR):\n", + " os.mkdir(TRACE_DIR)\n", + "\n", + "if not os.path.isfile(TRACE_FILE):\n", + " print \"Fetching trace file..\"\n", + " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "Fetching trace file..\n" + ] + } + ], + "prompt_number": 69 + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Run Object" ] - } - ], - "source": [ - "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", - " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", - "\n", - "if len(result):\n", - " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", - "else:\n", - " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Statistics" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" - ] - }, - { - "cell_type": "code", - "execution_count": 62, - "metadata": { - "collapsed": false - }, - "outputs": [ + }, { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 62, + "cell_type": "code", + "collapsed": false, + "input": [ + "# Create a Trace object\n", + "\n", + "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" + ], + "language": "python", "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check if the mean temperauture is less than CONTROL_TEMP" - ] - }, - { - "cell_type": "code", - "execution_count": 63, - "metadata": { - "collapsed": false - }, - "outputs": [ + "outputs": [], + "prompt_number": 58 + }, { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 63, + "cell_type": "heading", + "level": 1, "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expresssed as a percentage of the mean" - ] - }, - { - "cell_type": "code", - "execution_count": 64, - "metadata": { - "collapsed": false - }, - "outputs": [ + "source": [ + "Assertions" + ] + }, { - "data": { - "text/plain": [ - "2.2390646863105119" - ] - }, - "execution_count": 64, + "cell_type": "code", + "collapsed": false, + "input": [ + "# Create an Assertion Object\n", + "\n", + "from bart.common.Analyzer import Analyzer\n", + "t = Analyzer(run, config)\n", + "\n", + "BIG = '000000f0'\n", + "LITTLE = '0000000f'" + ], + "language": "python", "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Thermal Residency" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "metadata": { - "collapsed": false - }, - "outputs": [ + "outputs": [], + "prompt_number": 59 + }, + { + "cell_type": "heading", + "level": 2, + "metadata": {}, + "source": [ + "Assertion: Load and Dynamic Power" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", - "The 86.58th percentile temperature is 78.28\n" + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", + "\n", + " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", + " \n", + "" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", + "\n", + " \n", + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", + "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" + ] + } + ], + "prompt_number": 60 + }, + { + "cell_type": "heading", + "level": 2, + "metadata": {}, + "source": [ + "Assertion: Control Temperature and Sustainable Power" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", + "\n", + "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", + "\n", + "" ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", + " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", + "\n", + "if len(result):\n", + " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", + "else:\n", + " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" + ] + } + ], + "prompt_number": 61 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Statistics" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 62, + "text": [ + "True" + ] + } + ], + "prompt_number": 62 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if the mean temperauture is less than CONTROL_TEMP" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 63, + "text": [ + "True" + ] + } + ], + "prompt_number": 63 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expresssed as a percentage of the mean" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 64, + "text": [ + "2.2390646863105119" + ] + } + ], + "prompt_number": 64 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Thermal Residency" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "from bart.thermal.ThermalAssert import ThermalAssert\n", + "\n", + "t_assert = ThermalAssert(run)\n", + "end = run.get_duration()\n", + "\n", + "LOW = 0\n", + "HIGH = 78000\n", + "\n", + "# The thermal residency gives the percentage (or absolute time) spent in the\n", + "# specified temperature range. \n", + "\n", + "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", + " window=(0, end),\n", + " percent=True)\n", + "\n", + "for tz_id in result:\n", + " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", + " result[tz_id],\n", + " LOW/1000,\n", + " HIGH/1000)\n", + " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", + " \n", + " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", + " " + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", + "The 86.58th percentile temperature is 78.28\n" + ] + } + ], + "prompt_number": 65 } ], - "source": [ - "from bart.thermal.ThermalAssert import ThermalAssert\n", - "\n", - "t_assert = ThermalAssert(run)\n", - "end = run.get_duration()\n", - "\n", - "LOW = 0\n", - "HIGH = 78000\n", - "\n", - "# The thermal residency gives the percentage (or absolute time) spent in the\n", - "# specified temperature range. \n", - "\n", - "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", - " window=(0, end),\n", - " percent=True)\n", - "\n", - "for tz_id in result:\n", - " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", - " result[tz_id],\n", - " LOW/1000,\n", - " HIGH/1000)\n", - " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", - " \n", - " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", - " " - ] + "metadata": {} } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file -- cgit v1.2.3 From 8a4d301dc8a8c5d6313735979e6992dd1d3a6d7c Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 21 Sep 2015 21:04:30 +0100 Subject: notebooks: thermal: import numpy It's needed for the percentile calculation. --- notebooks/thermal/Thermal.ipynb | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb index e08ca17..7a4d7d2 100644 --- a/notebooks/thermal/Thermal.ipynb +++ b/notebooks/thermal/Thermal.ipynb @@ -17,7 +17,8 @@ "pygments_lexer": "ipython2", "version": "2.7.6" }, - "name": "" + "name": "", + "signature": "sha256:fbdc9bb5733461926996dbf0561127d4e14beb1a1b27d165c7f665fc8c32b9a7" }, "nbformat": 3, "nbformat_minor": 0, @@ -36,7 +37,8 @@ "cell_type": "code", "collapsed": false, "input": [ - "import trappy \n", + "import trappy\n", + "import numpy\n", "\n", "config = {}\n", "\n", @@ -96,16 +98,8 @@ ], "language": "python", "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "Fetching trace file..\n" - ] - } - ], - "prompt_number": 69 + "outputs": [], + "prompt_number": 2 }, { "cell_type": "heading", @@ -126,7 +120,7 @@ "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 58 + "prompt_number": 3 }, { "cell_type": "heading", @@ -151,7 +145,7 @@ "language": "python", "metadata": {}, "outputs": [], - "prompt_number": 59 + "prompt_number": 4 }, { "cell_type": "heading", @@ -204,7 +198,7 @@ ] } ], - "prompt_number": 60 + "prompt_number": 5 }, { "cell_type": "heading", @@ -250,7 +244,7 @@ ] } ], - "prompt_number": 61 + "prompt_number": 6 }, { "cell_type": "heading", @@ -279,13 +273,13 @@ { "metadata": {}, "output_type": "pyout", - "prompt_number": 62, + "prompt_number": 7, "text": [ "True" ] } ], - "prompt_number": 62 + "prompt_number": 7 }, { "cell_type": "markdown", @@ -306,13 +300,13 @@ { "metadata": {}, "output_type": "pyout", - "prompt_number": 63, + "prompt_number": 8, "text": [ "True" ] } ], - "prompt_number": 63 + "prompt_number": 8 }, { "cell_type": "markdown", @@ -333,13 +327,13 @@ { "metadata": {}, "output_type": "pyout", - "prompt_number": 64, + "prompt_number": 9, "text": [ "2.2390646863105119" ] } ], - "prompt_number": 64 + "prompt_number": 9 }, { "cell_type": "heading", @@ -390,7 +384,7 @@ ] } ], - "prompt_number": 65 + "prompt_number": 10 } ], "metadata": {} -- cgit v1.2.3 From 5db160262ad4e0d7cf6b688b0ce8fb12f053f6f0 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 21 Sep 2015 21:05:01 +0100 Subject: notebooks: thermal: fix copypasta When copy&pasting the getStatement for the LITTLE cluster, we forgot to update the "select". --- notebooks/thermal/Thermal.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb index 7a4d7d2..4f67e12 100644 --- a/notebooks/thermal/Thermal.ipynb +++ b/notebooks/thermal/Thermal.ipynb @@ -180,7 +180,7 @@ "\n", " \n", "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", - " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + " & (IN:dynamic_power > 0)\",reference=True, select=LITTLE)\n", "if len(result):\n", " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", "else:\n", -- cgit v1.2.3 From 8a05baa53e34a9744e08f91fd026245da802155e Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 21 Sep 2015 21:06:26 +0100 Subject: notebooks: thermal: s/expresssed/expressed/ --- notebooks/thermal/Thermal.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb index 4f67e12..087d9b8 100644 --- a/notebooks/thermal/Thermal.ipynb +++ b/notebooks/thermal/Thermal.ipynb @@ -18,7 +18,7 @@ "version": "2.7.6" }, "name": "", - "signature": "sha256:fbdc9bb5733461926996dbf0561127d4e14beb1a1b27d165c7f665fc8c32b9a7" + "signature": "sha256:59ef0b9fe2847e77f9df55deeb6df1f94f4fe2a3a0f99e13cba99854e8bf66ed" }, "nbformat": 3, "nbformat_minor": 0, @@ -312,7 +312,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expresssed as a percentage of the mean" + "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean" ] }, { -- cgit v1.2.3 From 43b0bd441f4b91357cb4895eb59a394eaf2feef0 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 23 Sep 2015 13:57:21 +0100 Subject: bart: Add __version__ --- bart/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bart/__init__.py b/bart/__init__.py index 7dbf066..5a662d5 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -18,3 +18,9 @@ import bart.sched import bart.common import bart.thermal +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution("trappy").version +except pkg_resources.DistributionNotFound: + __version__ = "local" -- cgit v1.2.3 From 2f34b6c8882ffaf145e6fb3d9469d1d76b16e092 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 23 Sep 2015 16:51:49 +0100 Subject: bart: fix getting the version of the installed bart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Copy&paste error 😇 --- bart/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/__init__.py b/bart/__init__.py index 5a662d5..079a06d 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -21,6 +21,6 @@ import bart.thermal import pkg_resources try: - __version__ = pkg_resources.get_distribution("trappy").version + __version__ = pkg_resources.get_distribution("bart").version except pkg_resources.DistributionNotFound: __version__ = "local" -- cgit v1.2.3 From e675e96e240d15074bec60c013e678072c9dba8f Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 15:07:22 +0100 Subject: doc: BART Documentation project * Add API reference docs * Rearrange notebooks and examples Signed-off-by: Kapileshwar Singh --- docs/api_reference/.gitignore | 3 + docs/api_reference/Makefile | 196 ++++++++++ docs/api_reference/conf.py | 378 ++++++++++++++++++ docs/api_reference/index.rst | 22 ++ docs/examples/thermal.py | 87 +++++ docs/notebooks/sched/SchedDeadline.ipynb | 642 +++++++++++++++++++++++++++++++ docs/notebooks/thermal/Thermal.ipynb | 393 +++++++++++++++++++ examples/thermal.py | 87 ----- notebooks/sched/SchedDeadline.ipynb | 642 ------------------------------- notebooks/thermal/Thermal.ipynb | 393 ------------------- 10 files changed, 1721 insertions(+), 1122 deletions(-) create mode 100644 docs/api_reference/.gitignore create mode 100644 docs/api_reference/Makefile create mode 100644 docs/api_reference/conf.py create mode 100644 docs/api_reference/index.rst create mode 100644 docs/examples/thermal.py create mode 100644 docs/notebooks/sched/SchedDeadline.ipynb create mode 100644 docs/notebooks/thermal/Thermal.ipynb delete mode 100644 examples/thermal.py delete mode 100644 notebooks/sched/SchedDeadline.ipynb delete mode 100644 notebooks/thermal/Thermal.ipynb diff --git a/docs/api_reference/.gitignore b/docs/api_reference/.gitignore new file mode 100644 index 0000000..588039e --- /dev/null +++ b/docs/api_reference/.gitignore @@ -0,0 +1,3 @@ +_build +*.rst +!index.rst diff --git a/docs/api_reference/Makefile b/docs/api_reference/Makefile new file mode 100644 index 0000000..f7c61a3 --- /dev/null +++ b/docs/api_reference/Makefile @@ -0,0 +1,196 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + ls *.rst | grep -v index.rst | xargs rm -f + +reference: + sphinx-apidoc -f -e -o . ../../bart + +html: reference + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: reference + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: reference + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: reference + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: reference + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: reference + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: reference + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BART.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BART.qhc" + +applehelp: reference + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +devhelp: reference + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/BART" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BART" + @echo "# devhelp" + +epub: reference + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: reference + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: reference + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: reference + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: reference + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: reference + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: reference + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: reference + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: reference + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: reference + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: reference + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: reference + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +coverage: reference + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +xml: reference + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: reference + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py new file mode 100644 index 0000000..3b9b1d3 --- /dev/null +++ b/docs/api_reference/conf.py @@ -0,0 +1,378 @@ +# -*- coding: utf-8 -*- +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# BART documentation build configuration file, created by +# sphinx-quickstart on Fri Sep 4 11:30:35 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +this_dir = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(this_dir, '../..')) +import bart + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'BART' +copyright = u'2015, ARM Ltd.' +author = u'Kapileshwar Singh(KP), Javi Merino' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '1' +# The full version, including alpha/beta/rc tags. +release = '1.0.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'classic' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'BARTdoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', + +# Latex figure (float) alignment +#'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'BART.tex', u'BART Documentation', + u'Kapileshwar Singh(KP), Javi Merino', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'bart', u'BART Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'BART', u'BART Documentation', + author, 'BART', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# -- Options for Epub output ---------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project +epub_author = author +epub_publisher = author +epub_copyright = copyright + +# The basename for the epub file. It defaults to the project name. +#epub_basename = project + +# The HTML theme for the epub output. Since the default themes are not optimized +# for small screen space, using the same theme for HTML and epub output is +# usually not wise. This defaults to 'epub', a theme designed to save visual +# space. +#epub_theme = 'epub' + +# The language of the text. It defaults to the language option +# or 'en' if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# A sequence of (type, uri, title) tuples for the guide element of content.opf. +#epub_guide = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True + +# Choose between 'default' and 'includehidden'. +#epub_tocscope = 'default' + +# Fix unsupported image types using the Pillow. +#epub_fix_images = False + +# Scale large images. +#epub_max_image_width = 0 + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#epub_show_urls = 'inline' + +# If false, no index is generated. +#epub_use_index = True diff --git a/docs/api_reference/index.rst b/docs/api_reference/index.rst new file mode 100644 index 0000000..f21d055 --- /dev/null +++ b/docs/api_reference/index.rst @@ -0,0 +1,22 @@ +.. BART documentation master file, created by + sphinx-quickstart on Fri Sep 4 12:40:17 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to BART's documentation! +================================== + +Contents: + +.. toctree:: + :maxdepth: 4 + + bart + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/examples/thermal.py b/docs/examples/thermal.py new file mode 100644 index 0000000..33c2afb --- /dev/null +++ b/docs/examples/thermal.py @@ -0,0 +1,87 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +An example file for usage of Analyzer for thermal assertions +""" +from bart.common.Analyzer import Analyzer +from trappy.stats.Topology import Topology +import unittest +import trappy + + +class TestThermal(unittest.TestCase): + + @classmethod + def setUpClass(cls): + # We can run a workload invocation script here + # Which then copies the required traces for analysis to + # the host. + trace_file = "update_a_trace_path_here" + run = trappy.Run(trace_file, "test_run") + + # Define the parameters that you intend to use in the grammar + config = {} + config["THERMAL"] = trappy.thermal.Thermal + config["OUT"] = trappy.cpu_power.CpuOutPower + config["IN"] = trappy.cpu_power.CpuInPower + config["PID"] = trappy.pid_controller.PIDController + config["GOVERNOR"] = trappy.thermal.ThermalGovernor + config["CONTROL_TEMP"] = 77000 + config["SUSTAINABLE_POWER"] = 2500 + config["EXPECTED_TEMP_QRT"] = 95 + config["EXPECTED_STD_PCT"] = 5 + + # Define a Topology + cls.BIG = '000000f0' + cls.LITTLE = '0000000f' + cls.tz = 0 + cls.analyzer = Analyzer(run, config) + + def test_temperature_quartile(self): + """Assert Temperature quartile""" + + self.assertTrue(self.analyzer.assertStatement( + "numpy.percentile(THERMAL:temp, EXPECTED_TEMP_QRT) < (CONTROL_TEMP + 5000)")) + + def test_average_temperature(self): + """Assert Average temperature""" + + self.assertTrue(self.analyzer.assertStatement( + "numpy.mean(THERMAL:temp) < CONTROL_TEMP", select=self.tz)) + + def test_temp_stdev(self): + """Assert StdDev(temp) as % of mean""" + + self.assertTrue(self.analyzer.assertStatement( + "(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\ + < EXPECTED_STD_PCT", select=self.tz)) + + def test_zero_load_input_power(self): + """Test power demand when load is zero""" + + zero_load_power_big = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ + & (IN:dynamic_power > 0)", reference=True, select=self.BIG) + self.assertEquals(len(zero_load_power_big), 0) + + zero_load_power_little = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ + & (IN:dynamic_power > 0)", reference=True, select=self.LITTLE) + self.assertEquals(len(zero_load_power_little), 0) + + def test_sustainable_power(self): + """temp > control_temp, allocated_power < sustainable_power""" + + self.analyzer.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\ + (PID:output > SUSTAINABLE_POWER)", reference=True, select=0) diff --git a/docs/notebooks/sched/SchedDeadline.ipynb b/docs/notebooks/sched/SchedDeadline.ipynb new file mode 100644 index 0000000..0172bb2 --- /dev/null +++ b/docs/notebooks/sched/SchedDeadline.ipynb @@ -0,0 +1,642 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from trappy.stats.Topology import Topology\n", + "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", + "from bart.sched.SchedAssert import SchedAssert\n", + "import trappy\n", + "import os\n", + "import operator\n", + "import json\n", + "\n", + "#Define a CPU Topology (for multi-cluster systems)\n", + "BIG = [1, 2]\n", + "LITTLE = [0, 3, 4, 5]\n", + "CLUSTERS = [BIG, LITTLE]\n", + "topology = Topology(clusters=CLUSTERS)\n", + "\n", + "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", + "\n", + "THRESHOLD = 10.0\n", + "def between_threshold(a, b):\n", + " return abs(((a - b) * 100.0) / b) < THRESHOLD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Periodic Yield" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", + "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", + "\n", + "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: Period\n", + "{\n", + " \"1844\": {\n", + " \"period\": 1.0085000000401578, \n", + " \"task_name\": \"periodic_yield\"\n", + " }, \n", + " \"1845\": {\n", + " \"period\": 29.822017857142669, \n", + " \"task_name\": \"periodic_yield\"\n", + " }\n", + "}\n", + "\n", + "PASS: DutyCycle\n", + "{\n", + " \"1844\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.074749999998857675\n", + " }, \n", + " \"1845\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.03862499999343072\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "\n", + "# Assert Period\n", + "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", + "if s.assertPeriod(30, between_threshold, rank=1):\n", + " print \"PASS: Period\"\n", + " print json.dumps(s.getPeriod(), indent=3)\n", + "\n", + "print \"\"\n", + " \n", + "# Assert DutyCycle \n", + "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CPU Hog" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PASS: DutyCycle\n", + "{\n", + " \"1852\": {\n", + " \"task_name\": \"cpuhog\", \n", + " \"dutycycle\": 10.050119999991693\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", + "s.plot().view()\n", + "\n", + "# Assert DutyCycle\n", + "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Changing Reservations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WINDOW -> [0.00, 2.00]\n", + "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [2.00, 4.00]\n", + "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [4.00, 6.00]\n", + "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [6.00, 8.00]\n", + "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [8.00, 10.00]\n", + "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [10.00, 12.00]\n", + "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [12.00, 14.00]\n", + "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [14.00, 16.00]\n", + "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [16.00, 18.00]\n", + "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [18.00, 20.00]\n", + "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [20.00, 22.00]\n", + "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", + "\n" + ] + } + ], + "source": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", + "s.plot().view()\n", + "\n", + "NUM_PHASES = 10\n", + "PHASE_DURATION = 2\n", + "start = s.getStartTime()\n", + "DUTY_CYCLE_FACTOR = 10\n", + "\n", + "\n", + "for phase in range(NUM_PHASES + 1):\n", + " window = (start + (phase * PHASE_DURATION),\n", + " start + ((phase + 1) * PHASE_DURATION))\n", + " \n", + " if phase % 2 == 0:\n", + " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", + " else:\n", + " DUTY_CYCLE = 100\n", + "\n", + "\n", + " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", + " window[1])\n", + " \n", + " \n", + " \n", + " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", + " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " else:\n", + " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " \n", + " print \"\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/notebooks/thermal/Thermal.ipynb b/docs/notebooks/thermal/Thermal.ipynb new file mode 100644 index 0000000..087d9b8 --- /dev/null +++ b/docs/notebooks/thermal/Thermal.ipynb @@ -0,0 +1,393 @@ +{ + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + }, + "name": "", + "signature": "sha256:59ef0b9fe2847e77f9df55deeb6df1f94f4fe2a3a0f99e13cba99854e8bf66ed" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ + { + "cells": [ + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Configuration" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import trappy\n", + "import numpy\n", + "\n", + "config = {}\n", + "\n", + "# TRAPpy Events\n", + "config[\"THERMAL\"] = trappy.thermal.Thermal\n", + "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", + "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", + "config[\"PID\"] = trappy.pid_controller.PIDController\n", + "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", + "\n", + "# Control Temperature\n", + "config[\"CONTROL_TEMP\"] = 77000\n", + "\n", + "# A temperature margin of 2.5 degrees Celsius\n", + "config[\"TEMP_MARGIN\"] = 2500\n", + "\n", + "# The Sustainable power at the control Temperature\n", + "config[\"SUSTAINABLE_POWER\"] = 2500\n", + "\n", + "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", + "config[\"EXPECTED_TEMP_QRT\"] = 95\n", + "\n", + "# Maximum expected Standard Deviation as a percentage\n", + "# of mean temperature\n", + "config[\"EXPECTED_STD_PCT\"] = 5\n" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Get the Trace" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import urllib\n", + "import os\n", + "\n", + "TRACE_DIR = \"example_trace_dat_thermal\"\n", + "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", + "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", + "\n", + "if not os.path.isdir(TRACE_DIR):\n", + " os.mkdir(TRACE_DIR)\n", + "\n", + "if not os.path.isfile(TRACE_FILE):\n", + " print \"Fetching trace file..\"\n", + " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 2 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Run Object" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# Create a Trace object\n", + "\n", + "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 3 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Assertions" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# Create an Assertion Object\n", + "\n", + "from bart.common.Analyzer import Analyzer\n", + "t = Analyzer(run, config)\n", + "\n", + "BIG = '000000f0'\n", + "LITTLE = '0000000f'" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 4 + }, + { + "cell_type": "heading", + "level": 2, + "metadata": {}, + "source": [ + "Assertion: Load and Dynamic Power" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", + "\n", + " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", + " \n", + "" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", + "\n", + " \n", + "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", + " & (IN:dynamic_power > 0)\",reference=True, select=LITTLE)\n", + "if len(result):\n", + " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", + "else:\n", + " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", + "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" + ] + } + ], + "prompt_number": 5 + }, + { + "cell_type": "heading", + "level": 2, + "metadata": {}, + "source": [ + "Assertion: Control Temperature and Sustainable Power" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", + "\n", + "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", + "\n", + "" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", + " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", + "\n", + "if len(result):\n", + " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", + "else:\n", + " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" + ] + } + ], + "prompt_number": 6 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Statistics" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 7, + "text": [ + "True" + ] + } + ], + "prompt_number": 7 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check if the mean temperauture is less than CONTROL_TEMP" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 8, + "text": [ + "True" + ] + } + ], + "prompt_number": 8 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "pyout", + "prompt_number": 9, + "text": [ + "2.2390646863105119" + ] + } + ], + "prompt_number": 9 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Thermal Residency" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "from bart.thermal.ThermalAssert import ThermalAssert\n", + "\n", + "t_assert = ThermalAssert(run)\n", + "end = run.get_duration()\n", + "\n", + "LOW = 0\n", + "HIGH = 78000\n", + "\n", + "# The thermal residency gives the percentage (or absolute time) spent in the\n", + "# specified temperature range. \n", + "\n", + "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", + " window=(0, end),\n", + " percent=True)\n", + "\n", + "for tz_id in result:\n", + " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", + " result[tz_id],\n", + " LOW/1000,\n", + " HIGH/1000)\n", + " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", + " \n", + " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", + " " + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", + "The 86.58th percentile temperature is 78.28\n" + ] + } + ], + "prompt_number": 10 + } + ], + "metadata": {} + } + ] +} \ No newline at end of file diff --git a/examples/thermal.py b/examples/thermal.py deleted file mode 100644 index 33c2afb..0000000 --- a/examples/thermal.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2015-2015 ARM Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -An example file for usage of Analyzer for thermal assertions -""" -from bart.common.Analyzer import Analyzer -from trappy.stats.Topology import Topology -import unittest -import trappy - - -class TestThermal(unittest.TestCase): - - @classmethod - def setUpClass(cls): - # We can run a workload invocation script here - # Which then copies the required traces for analysis to - # the host. - trace_file = "update_a_trace_path_here" - run = trappy.Run(trace_file, "test_run") - - # Define the parameters that you intend to use in the grammar - config = {} - config["THERMAL"] = trappy.thermal.Thermal - config["OUT"] = trappy.cpu_power.CpuOutPower - config["IN"] = trappy.cpu_power.CpuInPower - config["PID"] = trappy.pid_controller.PIDController - config["GOVERNOR"] = trappy.thermal.ThermalGovernor - config["CONTROL_TEMP"] = 77000 - config["SUSTAINABLE_POWER"] = 2500 - config["EXPECTED_TEMP_QRT"] = 95 - config["EXPECTED_STD_PCT"] = 5 - - # Define a Topology - cls.BIG = '000000f0' - cls.LITTLE = '0000000f' - cls.tz = 0 - cls.analyzer = Analyzer(run, config) - - def test_temperature_quartile(self): - """Assert Temperature quartile""" - - self.assertTrue(self.analyzer.assertStatement( - "numpy.percentile(THERMAL:temp, EXPECTED_TEMP_QRT) < (CONTROL_TEMP + 5000)")) - - def test_average_temperature(self): - """Assert Average temperature""" - - self.assertTrue(self.analyzer.assertStatement( - "numpy.mean(THERMAL:temp) < CONTROL_TEMP", select=self.tz)) - - def test_temp_stdev(self): - """Assert StdDev(temp) as % of mean""" - - self.assertTrue(self.analyzer.assertStatement( - "(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\ - < EXPECTED_STD_PCT", select=self.tz)) - - def test_zero_load_input_power(self): - """Test power demand when load is zero""" - - zero_load_power_big = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ - & (IN:dynamic_power > 0)", reference=True, select=self.BIG) - self.assertEquals(len(zero_load_power_big), 0) - - zero_load_power_little = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \ - & (IN:dynamic_power > 0)", reference=True, select=self.LITTLE) - self.assertEquals(len(zero_load_power_little), 0) - - def test_sustainable_power(self): - """temp > control_temp, allocated_power < sustainable_power""" - - self.analyzer.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\ - (PID:output > SUSTAINABLE_POWER)", reference=True, select=0) diff --git a/notebooks/sched/SchedDeadline.ipynb b/notebooks/sched/SchedDeadline.ipynb deleted file mode 100644 index 0172bb2..0000000 --- a/notebooks/sched/SchedDeadline.ipynb +++ /dev/null @@ -1,642 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from trappy.stats.Topology import Topology\n", - "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", - "from bart.sched.SchedAssert import SchedAssert\n", - "import trappy\n", - "import os\n", - "import operator\n", - "import json\n", - "\n", - "#Define a CPU Topology (for multi-cluster systems)\n", - "BIG = [1, 2]\n", - "LITTLE = [0, 3, 4, 5]\n", - "CLUSTERS = [BIG, LITTLE]\n", - "topology = Topology(clusters=CLUSTERS)\n", - "\n", - "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", - "\n", - "THRESHOLD = 10.0\n", - "def between_threshold(a, b):\n", - " return abs(((a - b) * 100.0) / b) < THRESHOLD" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Periodic Yield" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", - "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", - "\n", - "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: Period\n", - "{\n", - " \"1844\": {\n", - " \"period\": 1.0085000000401578, \n", - " \"task_name\": \"periodic_yield\"\n", - " }, \n", - " \"1845\": {\n", - " \"period\": 29.822017857142669, \n", - " \"task_name\": \"periodic_yield\"\n", - " }\n", - "}\n", - "\n", - "PASS: DutyCycle\n", - "{\n", - " \"1844\": {\n", - " \"task_name\": \"periodic_yield\", \n", - " \"dutycycle\": 0.074749999998857675\n", - " }, \n", - " \"1845\": {\n", - " \"task_name\": \"periodic_yield\", \n", - " \"dutycycle\": 0.03862499999343072\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "\n", - "# Assert Period\n", - "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", - "if s.assertPeriod(30, between_threshold, rank=1):\n", - " print \"PASS: Period\"\n", - " print json.dumps(s.getPeriod(), indent=3)\n", - "\n", - "print \"\"\n", - " \n", - "# Assert DutyCycle \n", - "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", - " print \"PASS: DutyCycle\"\n", - " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# CPU Hog" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
\n", - " \n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: DutyCycle\n", - "{\n", - " \"1852\": {\n", - " \"task_name\": \"cpuhog\", \n", - " \"dutycycle\": 10.050119999991693\n", - " }\n", - "}\n" - ] - } - ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", - "s.plot().view()\n", - "\n", - "# Assert DutyCycle\n", - "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", - " print \"PASS: DutyCycle\"\n", - " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Changing Reservations" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
\n", - " \n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WINDOW -> [0.00, 2.00]\n", - "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [2.00, 4.00]\n", - "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [4.00, 6.00]\n", - "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [6.00, 8.00]\n", - "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [8.00, 10.00]\n", - "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [10.00, 12.00]\n", - "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [12.00, 14.00]\n", - "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [14.00, 16.00]\n", - "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [16.00, 18.00]\n", - "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [18.00, 20.00]\n", - "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [20.00, 22.00]\n", - "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", - "\n" - ] - } - ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", - "s.plot().view()\n", - "\n", - "NUM_PHASES = 10\n", - "PHASE_DURATION = 2\n", - "start = s.getStartTime()\n", - "DUTY_CYCLE_FACTOR = 10\n", - "\n", - "\n", - "for phase in range(NUM_PHASES + 1):\n", - " window = (start + (phase * PHASE_DURATION),\n", - " start + ((phase + 1) * PHASE_DURATION))\n", - " \n", - " if phase % 2 == 0:\n", - " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", - " else:\n", - " DUTY_CYCLE = 100\n", - "\n", - "\n", - " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", - " window[1])\n", - " \n", - " \n", - " \n", - " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", - " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", - " s.getDutyCycle(window=window),\n", - " THRESHOLD)\n", - " else:\n", - " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", - " s.getDutyCycle(window=window),\n", - " THRESHOLD)\n", - " \n", - " print \"\"" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/notebooks/thermal/Thermal.ipynb b/notebooks/thermal/Thermal.ipynb deleted file mode 100644 index 087d9b8..0000000 --- a/notebooks/thermal/Thermal.ipynb +++ /dev/null @@ -1,393 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" - }, - "name": "", - "signature": "sha256:59ef0b9fe2847e77f9df55deeb6df1f94f4fe2a3a0f99e13cba99854e8bf66ed" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Configuration" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import trappy\n", - "import numpy\n", - "\n", - "config = {}\n", - "\n", - "# TRAPpy Events\n", - "config[\"THERMAL\"] = trappy.thermal.Thermal\n", - "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n", - "config[\"IN\"] = trappy.cpu_power.CpuInPower\n", - "config[\"PID\"] = trappy.pid_controller.PIDController\n", - "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n", - "\n", - "# Control Temperature\n", - "config[\"CONTROL_TEMP\"] = 77000\n", - "\n", - "# A temperature margin of 2.5 degrees Celsius\n", - "config[\"TEMP_MARGIN\"] = 2500\n", - "\n", - "# The Sustainable power at the control Temperature\n", - "config[\"SUSTAINABLE_POWER\"] = 2500\n", - "\n", - "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n", - "config[\"EXPECTED_TEMP_QRT\"] = 95\n", - "\n", - "# Maximum expected Standard Deviation as a percentage\n", - "# of mean temperature\n", - "config[\"EXPECTED_STD_PCT\"] = 5\n" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 1 - }, - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Get the Trace" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import urllib\n", - "import os\n", - "\n", - "TRACE_DIR = \"example_trace_dat_thermal\"\n", - "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n", - "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n", - "\n", - "if not os.path.isdir(TRACE_DIR):\n", - " os.mkdir(TRACE_DIR)\n", - "\n", - "if not os.path.isfile(TRACE_FILE):\n", - " print \"Fetching trace file..\"\n", - " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 2 - }, - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Run Object" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# Create a Trace object\n", - "\n", - "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 3 - }, - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Assertions" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# Create an Assertion Object\n", - "\n", - "from bart.common.Analyzer import Analyzer\n", - "t = Analyzer(run, config)\n", - "\n", - "BIG = '000000f0'\n", - "LITTLE = '0000000f'" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "heading", - "level": 2, - "metadata": {}, - "source": [ - "Assertion: Load and Dynamic Power" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n", - "\n", - " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n", - " \n", - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", - " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n", - "if len(result):\n", - " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n", - "else:\n", - " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n", - "\n", - " \n", - "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n", - " & (IN:dynamic_power > 0)\",reference=True, select=LITTLE)\n", - "if len(result):\n", - " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n", - "else:\n", - " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\"" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n", - "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n" - ] - } - ], - "prompt_number": 5 - }, - { - "cell_type": "heading", - "level": 2, - "metadata": {}, - "source": [ - "Assertion: Control Temperature and Sustainable Power" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n", - "\n", - "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n", - "\n", - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n", - " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n", - "\n", - "if len(result):\n", - " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n", - "else:\n", - " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" " - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n" - ] - } - ], - "prompt_number": 6 - }, - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Statistics" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "pyout", - "prompt_number": 7, - "text": [ - "True" - ] - } - ], - "prompt_number": 7 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check if the mean temperauture is less than CONTROL_TEMP" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "pyout", - "prompt_number": 8, - "text": [ - "True" - ] - } - ], - "prompt_number": 8 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "pyout", - "prompt_number": 9, - "text": [ - "2.2390646863105119" - ] - } - ], - "prompt_number": 9 - }, - { - "cell_type": "heading", - "level": 1, - "metadata": {}, - "source": [ - "Thermal Residency" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from bart.thermal.ThermalAssert import ThermalAssert\n", - "\n", - "t_assert = ThermalAssert(run)\n", - "end = run.get_duration()\n", - "\n", - "LOW = 0\n", - "HIGH = 78000\n", - "\n", - "# The thermal residency gives the percentage (or absolute time) spent in the\n", - "# specified temperature range. \n", - "\n", - "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n", - " window=(0, end),\n", - " percent=True)\n", - "\n", - "for tz_id in result:\n", - " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n", - " result[tz_id],\n", - " LOW/1000,\n", - " HIGH/1000)\n", - " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n", - " \n", - " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n", - " " - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "stream": "stdout", - "text": [ - "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n", - "The 86.58th percentile temperature is 78.28\n" - ] - } - ], - "prompt_number": 10 - } - ], - "metadata": {} - } - ] -} \ No newline at end of file -- cgit v1.2.3 From d2f136ce1ff7ebf384f3b679871455e78aea757a Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 15:08:11 +0100 Subject: doc: SchedAssert: Use sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 403 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 283 insertions(+), 120 deletions(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index ea57a6d..b8ce55f 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -13,8 +13,11 @@ # limitations under the License. # -"""A library for asserting scheduler scenarios based on the -statistics aggregation framework""" +""" +:mod:`bart.sched.SchedAssert` provides ability of assert scheduler behaviour. +The analysis is based on TRAPpy's statistics framework and is potent enough +to aggregate statistics over processor hierarchies. +""" import trappy import itertools @@ -31,21 +34,39 @@ class SchedAssert(object): """The primary focus of this class is to assert and verify predefined scheduler scenarios. This does not compare parameters - across runs""" + across runs + + :param run: A single trappy.Run object + or a path that can be passed to trappy.Run + :type run: :mod:`trappy.run.Run` + + :param topology: A topology that describes the arrangement of + CPU's on a system. This is useful for multi-cluster systems + where data needs to be aggregated at different topological + levels + :type topology: :mod:`trappy.stats.Topology.Topology` + + :param execname: The execname of the task to be analysed + + .. note:: + + There should be only one PID that maps to the specified + execname. If there are multiple PIDs :mod:`bart.sched.SchedMultiAssert` + should be used + + :type execname: str + + :param pid: The process ID of the task to be analysed + :type pid: int + + .. note: + + One of pid or execname is mandatory. If only execname + is specified, The current implementation will fail if + there are more than one processes with the same execname + """ def __init__(self, run, topology, execname=None, pid=None): - """Args: - run (trappy.Run): A single trappy.Run object - or a path that can be passed to trappy.Run - topology(trappy.stats.Topology): The CPU topology - execname(str, optional): Optional execname of the task - under consideration. - PID(int): The PID of the task to be checked - - One of pid or execname is mandatory. If only execname - is specified, The current implementation will fail if - there are more than one processes with the same execname - """ run = Utils.init_run(run) @@ -92,12 +113,14 @@ class SchedAssert(object): def _aggregator(self, aggfunc): """ - Returns an aggregator corresponding to the + Return an aggregator corresponding to the aggfunc, the aggregators are memoized for performance - Args: - aggfunc (function(pandas.Series)): Function parameter that - accepts a pandas.Series object and returns a vector/scalar result + :param aggfunc: Function parameter that + accepts a :mod:`pandas.Series` object and + returns a vector/scalar + + :type: function(:mod:`pandas.Series`) """ if aggfunc not in self._aggs.keys(): @@ -109,28 +132,42 @@ class SchedAssert(object): def getResidency(self, level, node, window=None, percent=False): """ Residency of the task is the amount of time it spends executing - a particular node of a topological level. For example: + a particular group of a topological level. For example: + :: + + clusters=[] + big = [1,2] + little = [0,3,4,5] + + topology = Topology(clusters=clusters) + + level="cluster" + node = [1,2] + + This will return the residency of the task on the big cluster. If + percent is specified it will be normalized to the total runtime + of the task - clusters=[] - big = [1,2] - little = [0,3,4,5] + :param level: The topological level to which the group belongs + :type level (hashable): - topology = Topology(clusters=clusters) + :param node: The group of CPUs for which residency + needs to calculated + :type node: list - level="cluster" - node = [1,2] + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple - Will return the residency of the task on the big cluster. If - percent is specified it will be normalized to the total RUNTIME - of the TASK + :param percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + :type percent: bool - Args: - level (hashable): The level to which the node belongs - node (list): The node for which residency needs to calculated - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: If true the result is normalized to the total runtime - of the task and returned as a percentage + .. math:: + + R = \\frac{T_{group} \\times 100}{T_{total}} + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertResidency` """ # Get the index of the node in the level @@ -157,24 +194,54 @@ class SchedAssert(object): window=None, percent=False): """ - Args: - level (hashable): The level to which the node belongs - node (list): The node for which residency needs to assert - expected_value (double): The expected value of the residency - operator (function): A binary operator function that returns - a boolean - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: If true the result is normalized to the total runtime - of the task and returned as a percentage + :param level: The topological level to which the group belongs + :type level (hashable): + + :param node: The group of CPUs for which residency + needs to calculated + :type node: list + + :param expected_value: The expected value of the residency + :type expected_value: double + + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.ge + assertResidency(level, node, expected_value, op) + + Will do the following check: + :: + + getResidency(level, node) >= expected_value + + A custom function can also be passed: + :: + + THRESHOLD=5 + def between_threshold(a, expected): + return abs(a - expected) <= THRESHOLD + + :type operator: function + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param percent: If true the result is normalized to the total runtime + of the task and returned as a percentage + :type percent: bool + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getResidency` """ node_value = self.getResidency(level, node, window, percent) return operator(node_value, expected_value) def getStartTime(self): """ - Returns the first time the task ran - (across all CPUs) + :return: The first time the task ran across all the CPUs """ agg = self._aggregator(sconf.first_time) @@ -183,8 +250,8 @@ class SchedAssert(object): def getEndTime(self): """ - Returns the last time the task ran - (across all CPUs) + :return: The first last time the task ran across + all the CPUs """ agg = self._aggregator(sconf.first_time) @@ -254,20 +321,25 @@ class SchedAssert(object): ignore_multiple=True): """ This function asserts that there is context switch from the - from_node to the to_node: - - Args: - level (hashable): The level to which the node belongs - from_node (list): The node from which the task switches out - to_node (list): The node to which the task switches - window (tuple): A (start, end) tuple window of time where the - switch needs to be asserted - ignore_multiple (bool): If true, the function will ignore multiple - switches in the window, If false the assert will be true if and - only if there is a single switch within the specified window - - The function will only return true if and only if there is one - context switch between the specified nodes + :code:`from_node` to the :code:`to_node`: + + :param level: The topological level to which the group belongs + :type level (hashable): + + :param from_node: The node from which the task switches out + :type from_node: list + + :param to_node: The node to which the task switches + :type to_node: list + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param ignore_multiple: If true, the function will ignore multiple + switches in the window, If false the assert will be true if and + only if there is a single switch within the specified window + :type ignore_multiple: bool """ from_node_index = self._topology.get_index(level, from_node) @@ -294,14 +366,18 @@ class SchedAssert(object): return False def getRuntime(self, window=None, percent=False): - """Returns the Total Runtime of a task - - Args: - window (tuple): A (start, end) tuple to limit - the scope of the calculation - percent (boolean): If True, the result is returned - as a percentage of the total execution time - of the run. + """Return the Total Runtime of a task + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param percent: If True, the result is returned + as a percentage of the total execution time + of the run. + :type percent: bool + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertRuntime` """ agg = self._aggregator(sconf.residency_sum) @@ -328,27 +404,59 @@ class SchedAssert(object): percent=False): """Assert on the total runtime of the task - Args: - expected_value (double): The expected value of the total runtime - operator (func(a, b)): A binary operator function that - returns a boolean - window (tuple): A (start, end) tuple to limit the - scope of the calculation - percent (boolean): If True, the result is returned - as a percentage of the total execution time of the run. + :param expected_value: The expected value of the runtime + :type expected_value: double + + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.ge + assertRuntime(expected_value, op) + + Will do the following check: + :: + + getRuntime() >= expected_value + + A custom function can also be passed: + :: + + THRESHOLD=5 + def between_threshold(a, expected): + return abs(a - expected) <= THRESHOLD + + :type operator: function + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param percent: If True, the result is returned + as a percentage of the total execution time + of the run. + :type percent: bool + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getRuntime` """ run_time = self.getRuntime(window, percent) return operator(run_time, expected_value) def getPeriod(self, window=None, align="start"): - """Returns average period of the task in (ms) + """Return average period of the task in (ms) + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param align: + :code:`"start"` aligns period calculation to switch-in events + :code:`"end"` aligns the calculation to switch-out events + :type param: str - Args: - window (tuple): A (start, end) tuple to limit the - scope of the calculation - align: "start" aligns period calculation to switch-in events - "end" aligns the calculation to switch-out events + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertPeriod` """ agg = self._aggregator(sconf.period) @@ -364,45 +472,92 @@ class SchedAssert(object): align="start"): """Assert on the period of the task - Args: - expected_value (double): The expected value of the total runtime - operator (func(a, b)): A binary operator function that - returns a boolean - window (tuple): A (start, end) tuple to limit the - scope of the calculation - percent (boolean): If True, the result is returned - as a percentage of the total execution time of the run. + :param expected_value: The expected value of the runtime + :type expected_value: double + + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.ge + assertPeriod(expected_value, op) + + Will do the following check: + :: + + getPeriod() >= expected_value + + A custom function can also be passed: + :: + + THRESHOLD=5 + def between_threshold(a, expected): + return abs(a - expected) <= THRESHOLD + + :param window: A (start, end) tuple to limit the scope of the + calculation. + :type window: tuple + + :param align: + :code:`"start"` aligns period calculation to switch-in events + :code:`"end"` aligns the calculation to switch-out events + :type param: str + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getPeriod` """ period = self.getPeriod(window, align) return operator(period, expected_value) def getDutyCycle(self, window): - """Returns the duty cycle of the task - Args: - window (tuple): A (start, end) tuple to limit the - scope of the calculation + """Return the duty cycle of the task + + :param window: A (start, end) tuple to limit the scope of the + calculation. + :type window: tuple Duty Cycle: The percentage of time the task spends executing - in the given window + in the given window of time + + .. math:: + + \delta_{cycle} = \\frac{T_{exec} \\times 100}{T_{window}} + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertDutyCycle` """ return self.getRuntime(window, percent=True) def assertDutyCycle(self, expected_value, operator, window): """ - Args: - expected_value (double): The expected value of - the duty cycle - operator (func(a, b)): A binary operator function that - returns a boolean - window (tuple): A (start, end) tuple to limit the - scope of the calculation + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.ge + assertPeriod(expected_value, op) + + Will do the following check: + :: + + getPeriod() >= expected_value + + A custom function can also be passed: + :: + + THRESHOLD=5 + def between_threshold(a, expected): + return abs(a - expected) <= THRESHOLD + + :param window: A (start, end) tuple to limit the scope of the + calculation. + :type window: tuple + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getDutyCycle` - Duty Cycle: - The percentage of time the task spends executing - in the given window """ return self.assertRuntime( expected_value, @@ -412,9 +567,9 @@ class SchedAssert(object): def getFirstCpu(self, window=None): """ - Args: - window (tuple): A (start, end) tuple to limit the - scope of the calculation + :return: The first CPU the task ran on + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertFirstCPU` """ agg = self._aggregator(sconf.first_cpu) @@ -429,17 +584,26 @@ class SchedAssert(object): def assertFirstCpu(self, cpus, window=None): """ - Args: - cpus (int, list): A list of acceptable CPUs - window (tuple): A (start, end) tuple to limit the scope - of the calculation + Check if the Task started (first ran on in the duration + of the trace) on a particular CPU(s) + + :param cpus: A list of acceptable CPUs + :type cpus: int, list + + .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getFirstCPU` """ + first_cpu = self.getFirstCpu(window=window) cpus = listify(cpus) return first_cpu in cpus def generate_events(self, level, start_id=0, window=None): - """Generate events for the trace plot""" + """Generate events for the trace plot + + .. note:: + This is an internal function accessed by the + :mod:`bart.sched.SchedMultiAssert` class for plotting data + """ agg = self._aggregator(sconf.trace_event) result = agg.aggregate(level=level, window=window) @@ -454,9 +618,8 @@ class SchedAssert(object): def plot(self, level="cpu", window=None, xlim=None): """ - Returns: - trappy.plotter.AbstractDataPlotter - Call .view() to draw the graph + :return: :mod:`trappy.plotter.AbstractDataPlotter` instance + Call :func:`view` to draw the graph """ if not xlim: -- cgit v1.2.3 From 0e116090dd51444443467bf54a2f04b86038a51d Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 16:20:09 +0100 Subject: doc: SchedMultiAssert: Use sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/sched/SchedMultiAssert.py | 131 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 117 insertions(+), 14 deletions(-) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 95f625a..65fad7a 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -25,18 +25,118 @@ from bart.sched.SchedAssert import SchedAssert from bart.common import Utils class SchedMultiAssert(object): - - """The primary focus of this class is to assert and verify - predefined scheduler scenarios. This does not compare parameters - across runs""" + """This is vector assertion class built on top of + :mod:`bart.sched.SchedAssert.SchedAssert` + + :param run: A single trappy.Run object + or a path that can be passed to trappy.Run + :type run: :mod:`trappy.run.Run` + + :param topology: A topology that describes the arrangement of + CPU's on a system. This is useful for multi-cluster systems + where data needs to be aggregated at different topological + levels + :type topology: :mod:`trappy.stats.Topology.Topology` + + :param execnames: The execnames of the task to be analysed + + A single execname or a list of execnames can be passed. + There can be multiple processes associated with a single + execname parameter. The execnames are searched using a prefix + match. + :type execname: list, str + + :param pid: The process ID of the task to be analysed + :type pid: list, int + + Consider the following processes which need to be analysed + + ===== ============== + PID execname + ===== ============== + 11 task_1 + 22 task_2 + 33 task_3 + ===== ============== + + A :mod:`bart.sched.SchedMultiAssert.SchedMultiAssert` instance be created + following different ways: + + - Using execname prefix match + :: + + SchedMultiAssert(run, topology, execnames="task_") + + - Individual Task names + :: + + SchedMultiAssert(run, topology, execnames=["task_1", "task_2", "task_3"]) + + - Using Process IDs + :: + + SchedMultiAssert(run, topology, pids=[11, 22, 33]) + + + All the functionality provided in :mod:`bart.sched.SchedAssert.SchedAssert` is available + in this class with the addition of handling vector assertions. + + For example consider the use of :func:`getDutyCycle` + :: + + >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s.getDutyCycle(window=(start, end)) + { + "11": { + "task_name": "task_1", + "dutycycle": 10.0 + }, + "22": { + "task_name": "task_2", + "dutycycle": 20.0 + }, + "33": { + "task_name": "task_3", + "dutycycle": 30.0 + }, + } + + The assertions can be used in a similar way + :: + + >>> import operator as op + >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s.assertDutyCycle(15, op.ge, window=(start, end)) + { + "11": { + "task_name": "task_1", + "dutycycle": False + }, + "22": { + "task_name": "task_2", + "dutycycle": True + }, + "33": { + "task_name": "task_3", + "dutycycle": True + }, + } + + The above result can be coalesced using a :code:`rank` parameter + As we know that only 2 processes have duty cycles greater than 15% + we can do the following: + :: + + >>> import operator as op + >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s.assertDutyCycle(15, op.ge, window=(start, end), rank=2) + True + + See :mod:`bart.sched.SchedAssert.SchedAssert` for the available + functionality + """ def __init__(self, run, topology, execnames): - """Args: - run (trappy.Run): A single trappy.Run object - or a path that can be passed to trappy.Run - topology(trappy.stats.Topology): The CPU topology - execname(str, list): List of execnames or single task - """ self._execnames = listify(execnames) self._run = Utils.init_run(run) @@ -120,7 +220,11 @@ class SchedMultiAssert(object): return result def generate_events(self, level, window=None): - """Generate Events for the trace plot""" + """Generate Events for the trace plot + + .. note:: + This is an internal function for plotting data + """ events = {} for s_assert in self._asserts.values(): @@ -130,9 +234,8 @@ class SchedMultiAssert(object): def plot(self, level="cpu", window=None, xlim=None): """ - Returns: - trappy.plotter.AbstractDataPlotter. Call .view() for - displaying the plot + :return: :mod:`trappy.plotter.AbstractDataPlotter` instance + Call :func:`view` to draw the graph """ if not xlim: -- cgit v1.2.3 From 81361c2a1370d30b82d0598fb97a0d96b9366043 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 16:55:23 +0100 Subject: doc: ThermalAssert: Use sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/thermal/ThermalAssert.py | 83 ++++++++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 21 deletions(-) diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py index 684405c..75b8511 100644 --- a/bart/thermal/ThermalAssert.py +++ b/bart/thermal/ThermalAssert.py @@ -26,7 +26,11 @@ import numpy as np class ThermalAssert(object): """A class that accepts a TRAPpy Run object and - provides assertions for thermal behaviours""" + provides assertions for thermal behaviours + + :param run: A path to the trace file or a TRAPpy Run object + :type run: str, :mod:`trappy.run.Run` + """ def __init__(self, run, config=None): @@ -34,15 +38,24 @@ class ThermalAssert(object): self._analyzer = Analyzer(self._run, config) def getThermalResidency(self, temp_range, window, percent=False): - """Returns the total time spent in a given temperature range - Args: - temp_range (tuple): A tuple of (low_temp, high_temp) - which the specifies the range of temperature that - one intends to calculate the residency for. - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: Returns the residency as a percentage of the total - duration of the trace + """Return the total time spent in a given temperature range + + :param temp_range: A tuple of (low_temp, high_temp) + which the specifies the range of temperature that + one intends to calculate the residency for. + :type temp_range: tuple + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param percent: Returns the residency as a percentage of the total + duration of the trace + :type percent: bool + + .. seealso: + + :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency` """ # Get a pivoted thermal temperature data using the grammar @@ -77,17 +90,45 @@ class ThermalAssert(object): window, percent=False): """ - Args: - expected_value (double): The expected value of the residency - operator (function): A binary operator function that returns - a boolean - temp_range (tuple): A tuple of (low_temp, high_temp) - which the specifies the range of temperature that - one intends to calculate the residency for. - window (tuple): A (start, end) tuple to limit the scope of the - residency calculation. - percent: Returns the residency as a percentage of the total - duration of the trace + :param expected_value: The expected value of the residency + :type expected_value: double + + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.ge + assertThermalResidency(temp_range, expected_value, op) + + Will do the following check: + :: + + getThermalResidency(temp_range) >= expected_value + + A custom function can also be passed: + :: + + THRESHOLD=5 + def between_threshold(a, expected): + return abs(a - expected) <= THRESHOLD + + :param temp_range: A tuple of (low_temp, high_temp) + which the specifies the range of temperature that + one intends to calculate the residency for. + :type temp_range: tuple + + :param window: A (start, end) tuple to limit the scope of the + residency calculation. + :type window: tuple + + :param percent: Returns the residency as a percentage of the total + duration of the trace + :type percent: bool + + .. seealso: + + :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency` """ residency = self.getThermalResidency(temp_range, window, percent) -- cgit v1.2.3 From 5f386027426e575d3f51af1cb44042b599fb0d41 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 16:56:20 +0100 Subject: doc: Utils: Add Sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index ecb83d3..7a1a3b4 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -18,7 +18,12 @@ import trappy def init_run(trace): - """Initialize the Run Object""" + """Initialize the Run Object + + :param trace: Path for the trace file + or a trace object + :type trace: str, :mod:`trappy.run.Run` + """ if isinstance(trace, basestring): return trappy.Run(trace) @@ -29,8 +34,14 @@ def init_run(trace): raise ValueError("Invalid trace Object") def select_window(series, window): - """Library Function to select a portion of - pandas time series + """Helper Function to select a portion of + pandas time series + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple """ if not window: -- cgit v1.2.3 From e79171c2d42739d6d80958037137600cf8c7ea6b Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 21 Sep 2015 16:57:01 +0100 Subject: doc: Analyser: Add sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/common/Analyzer.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index 60f9617..80c71a4 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -29,17 +29,30 @@ import numpy as np class Analyzer(object): """ - Args: - data (trappy.Run): A trappy.Run instance - config (dict): A dictionary of variables, classes - and functions that can be used in the statements + :param data: TRAPpy Run Object + :type data: :mod:`trappy.run.Run` + + :param config: A dictionary of variables, classes + and functions that can be used in the statements + :type config: dict """ def __init__(self, data, config, topology=None): self._parser = Parser(data, config, topology) def assertStatement(self, statement, select=None): - """Solve the statement for a boolean result""" + """Solve the statement for a boolean result + + :param statement: A string representing a valid + :mod:`trappy.stats.grammar` statement + :type statement: str + + :param select: If the result represents a boolean + mask and the data was derived from a TRAPpy event + with a pivot value. The :code:`select` can be + used to select a particular pivot value + :type select: :mod:`pandas.DataFrame` column + """ result = self.getStatement(statement, select=select) -- cgit v1.2.3 From e83cd9ce703c57544aa915323e68be020b8b43ee Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 23 Sep 2015 14:16:58 +0100 Subject: doc: SchedMatrix: Use sphinx friendly docstrings Signed-off-by: Kapileshwar Singh --- bart/sched/SchedMatrix.py | 179 +++++++++++++++++++++++++++++++++------------- 1 file changed, 131 insertions(+), 48 deletions(-) diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index 51cb60f..3a91fae 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -26,41 +26,42 @@ SchedMatrix creates a Matrix of Scheduler Waveform Correlations A = Reference Execution B = Execution to be Evaluated - +---+ +---+ - | | | | -A1, B3 +---+ +--+ +--------------+ - +---+ +---+ - | | | | -A2, B4 +--------------+ +--+ +---+ - +---+ +---+ - | | | | -A3, B1 +---+ +--+ +--------------+ - +---+ +---+ - | | | | -A4, B2 +--------------+ +--+ +---+ - - -Correlation Matrix - - B1 B2 B3 B4 -A1 1 0 1 0 - -A2 0 1 0 1 - -A3 1 0 1 0 - -A4 0 1 0 1 - - -Thus a success criteria can be defined as - -A1 has two similar threads in the +.. code:: + + +---+ +---+ + | | | | + A1, B3 +---+ +--+ +--------------+ + +---+ +---+ + | | | | + A2, B4 +--------------+ +--+ +---+ + +---+ +---+ + | | | | + A3, B1 +---+ +--+ +--------------+ + +---+ +---+ + | | | | + A4, B2 +--------------+ +--+ +---+ + + +**Correlation Matrix** + + === ==== ==== ==== ==== + B1 B2 B3 B4 + === ==== ==== ==== ==== + A1 1 0 1 0 + A2 0 1 0 1 + A3 1 0 1 0 + A4 0 1 0 1 + === ==== ==== ==== ==== + + +Thus a success criteria can be defined as A1 having two similar threads in the evaluated execution +:: -assertSiblings(A1, 2, operator.eq) -assertSiblings(A2, 2, operator.eq) -assertSiblings(A3, 2, operator.eq) -assertSiblings(A4, 2, operator.eq) + assertSiblings(A1, 2, operator.eq) + assertSiblings(A2, 2, operator.eq) + assertSiblings(A3, 2, operator.eq) + assertSiblings(A4, 2, operator.eq) """ @@ -81,12 +82,67 @@ POSITIVE_TOLERANCE = 0.80 class SchedMatrix(object): - """Valid cases are: + """ + :param reference_trace: The trace file path/run object + to be used as a reference + :type reference_trace: str, :mod:`trappy.run.Run` + + :param trace: The trace file path/run object + to be verified + :type trace: str, :mod:`trappy.run.Run` + + :param topology: A topology that describes the arrangement of + CPU's on a system. This is useful for multi-cluster systems + where data needs to be aggregated at different topological + levels + :type topology: :mod:`trappy.stats.Topology.Topology` + + :param execnames: The execnames of the task to be analysed + + A single execname or a list of execnames can be passed. + There can be multiple processes associated with a single + execname parameter. The execnames are searched using a prefix + match. + :type execname: list, str + + Consider the following processes which need to be analysed: + + * **Reference Trace** + + ===== ============== + PID execname + ===== ============== + 11 task_1 + 22 task_2 + 33 task_3 + ===== ============== + + * **Trace to be verified** + + ===== ============== + PID execname + ===== ============== + 77 task_1 + 88 task_2 + 99 task_3 + ===== ============== + + + A :mod:`bart.sched.SchedMatrix.SchedMatrix` instance be created + following different ways: + + - Using execname prefix match + :: + + SchedMatrix(r_trace, trace, topology, + execnames="task_") + + - Individual Task names + :: + + SchedMatrix(r_trace, trace, topology, + execnames=["task_1", "task_2", "task_3"]) - * Single execname, multiple PIDs - * PID List - * Multiple execname, one-to-one PID - association """ def __init__( @@ -187,8 +243,18 @@ class SchedMatrix(object): def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE): """Return the number of processes in the - reference trace that have a correlation - greater than tolerance + reference trace that have a correlation + greater than tolerance + + :param pid: The PID of the process in the reference + trace + :type pid: int + + :param tolerance: A correlation value > tolerance + will classify the resultant process as a sibling + :type tolerance: float + + .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.assertSiblings` """ ref_pid_idx = self._reference_pids.index(pid) @@ -198,13 +264,30 @@ class SchedMatrix(object): def assertSiblings(self, pid, expected_value, operator, tolerance=POSITIVE_TOLERANCE): """Assert that the number of siblings in the reference - trace match the expected value and the operator - - Args: - pid: The PID in the reference trace - expected_value: the second argument to the operator - operator: a function of the type f(a, b) that returns - a boolean - """ + trace match the expected value and the operator + + :param pid: The PID of the process in the reference + trace + :type pid: int + + :param operator: A binary operator function that returns + a boolean. For example: + :: + + import operator + op = operator.eq + getSiblings(pid, expected_value, op) + + Will do the following check: + :: + + getSiblings(pid) == expected_value + + :param tolerance: A correlation value > tolerance + will classify the resultant process as a sibling + :type tolerance: float + + .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.getSiblings` + """ num_siblings = self.getSiblings(pid, tolerance) return operator(num_siblings, expected_value) -- cgit v1.2.3 From 04d265f52c1b17b0e45594cee7d655ee11137380 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 5 Oct 2015 17:10:45 +0100 Subject: doc: ThermalAssert: Fix typos Signed-off-by: Kapileshwar Singh --- bart/thermal/ThermalAssert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py index 75b8511..1dd77a2 100644 --- a/bart/thermal/ThermalAssert.py +++ b/bart/thermal/ThermalAssert.py @@ -41,7 +41,7 @@ class ThermalAssert(object): """Return the total time spent in a given temperature range :param temp_range: A tuple of (low_temp, high_temp) - which the specifies the range of temperature that + which specifies the range of temperature that one intends to calculate the residency for. :type temp_range: tuple @@ -114,7 +114,7 @@ class ThermalAssert(object): return abs(a - expected) <= THRESHOLD :param temp_range: A tuple of (low_temp, high_temp) - which the specifies the range of temperature that + which specifies the range of temperature that one intends to calculate the residency for. :type temp_range: tuple -- cgit v1.2.3 From 0fa287ecda476020bf469ec9057c7c453cd52cf5 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 5 Oct 2015 18:43:43 +0100 Subject: bart 1.0.1 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9962e73..3ded12c 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.0.0" +VERSION = "1.0.1" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From a2d51ed8ad771801bf53969954948fe3643ed255 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 5 Oct 2015 18:46:14 +0100 Subject: setup: rename to bart-py for pypi Bart is already taken by some software to model the light curves of exoplanet transits. --- bart/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/__init__.py b/bart/__init__.py index 079a06d..b5bada9 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -21,6 +21,6 @@ import bart.thermal import pkg_resources try: - __version__ = pkg_resources.get_distribution("bart").version + __version__ = pkg_resources.get_distribution("bart-py").version except pkg_resources.DistributionNotFound: __version__ = "local" diff --git a/setup.py b/setup.py index 3ded12c..97ea81f 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ REQUIRES = [ "TRAPpy==1.0.0", ] -setup(name='BART', +setup(name='bart-py', version=VERSION, license="Apache v2", author="ARM-BART", -- cgit v1.2.3 From f8ba14c9fa7f650654461a41a13b27eb16c2e969 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 5 Oct 2015 18:54:24 +0100 Subject: setup: don't require a specific version of trappy This will break as soon as we update trappy. Let's not require a specific version until we know there are incompatibilities. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 97ea81f..523eac6 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ to assert behaviours using the FTrace output from the kernel """ REQUIRES = [ - "TRAPpy==1.0.0", + "TRAPpy", ] setup(name='bart-py', -- cgit v1.2.3 From daa7e56dd0e107a3018563da0b6fb033f2e0bee5 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 5 Oct 2015 19:05:04 +0100 Subject: setup: configure upload_docs Tell setuptools where we build the documentation so that "python setup.py upload_docs" works. --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 setup.cfg diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..c4b13e3 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[upload_sphinx] +upload-dir = docs/api_reference/_build/html -- cgit v1.2.3 From 5b1275f59fece6373ab4d92edae7dbffd1802972 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 22 Oct 2015 12:25:50 +0100 Subject: sched: Update usage of EventPlot API The EventPlot API has changed and accepts keyword args for num_lanes and lane_prefix. Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 4 +++- bart/sched/SchedMultiAssert.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index b8ce55f..04a7535 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -633,4 +633,6 @@ class SchedAssert(object): names = [self.name] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) + return trappy.EventPlot(events, names, xlim, + lane_prefix=lane_prefix, + num_lanes=num_lanes) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 65fad7a..e949d2c 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -248,4 +248,6 @@ class SchedMultiAssert(object): names = [s.name for s in self._asserts.values()] num_lanes = self._topology.level_span(level) lane_prefix = level.upper() + ": " - return trappy.EventPlot(events, names, lane_prefix, num_lanes, xlim) + return trappy.EventPlot(events, names, xlim, + lane_prefix=lane_prefix, + num_lanes=num_lanes) -- cgit v1.2.3 From 6427c9e6ab7f4f588550c7a8befdb600367cc636 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 22 Oct 2015 14:20:32 +0100 Subject: setup: Depend on trappy >= 2.0 trappy has changed the API for EventPlot in f4eefc02239a ("plotter: EventPlot: Allow specification of lane names") which will be released in trappy 2.0. This is fixed in 5b1275f59fec ("sched: Update usage of EventPlot API") in bart. Make sure bart the next version of bart depends on the appropriate version of trappy. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 523eac6..fc7950d 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ to assert behaviours using the FTrace output from the kernel """ REQUIRES = [ - "TRAPpy", + "TRAPpy>=2.0", ] setup(name='bart-py', -- cgit v1.2.3 From f20180dce12b2c234660cbbc81f2eafb86e7ad5c Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Mon, 2 Nov 2015 11:34:00 +0000 Subject: sched: SchedAssert: Fix division by zero in getPeriod Fixes issue #21 Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index 04a7535..906eead 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -462,7 +462,10 @@ class SchedAssert(object): agg = self._aggregator(sconf.period) period = agg.aggregate(level="all", window=window)[0] total, length = map(sum, zip(*period)) - return (total * 1000) / length + if length == 0: + return float("NaN") + else: + return (total * 1000) / length def assertPeriod( self, -- cgit v1.2.3 From 2b2cc6dfef79caa242416d1037811235020d0570 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 4 Nov 2015 11:03:41 +0000 Subject: bart 1.1.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fc7950d..0c6de39 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.0.1" +VERSION = "1.1.0" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From 0e83faeeee8af3e5b55561a88e0f58ec5e4434da Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 5 Nov 2015 11:34:00 +0000 Subject: sched: Use Median for calculation of the period of a task Here is what I do for calculating the period of the task: Let's say a task started execution at the following times T_1, T_2, ...T_n Currently for align = "start" the period is Average of: (T_2 - T_1), (T_4 - T_3), ....(T_n - T_{n-1}) In this method we have an edge condition when a task does not run for a particular time or when it migrates between CPUs causing the average to end up significantly higher than the actual period of the task. However, as periodicity is statistical in nature and can vary during the lifetime of the task, we can have a good understanding of the periodicity by getting the median period of the deltas above. Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index 906eead..b3cadfc 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -445,7 +445,19 @@ class SchedAssert(object): return operator(run_time, expected_value) def getPeriod(self, window=None, align="start"): - """Return average period of the task in (ms) + """Return the period of the task in (ms) + + Let's say a task started execution at the following times: + + .. math:: + + T_1, T_2, ...T_n + + The period is defined as: + + .. math:: + + Median((T_2 - T_1), (T_4 - T_3), ....(T_n - T_{n-1})) :param window: A (start, end) tuple to limit the scope of the residency calculation. @@ -460,12 +472,12 @@ class SchedAssert(object): """ agg = self._aggregator(sconf.period) - period = agg.aggregate(level="all", window=window)[0] - total, length = map(sum, zip(*period)) - if length == 0: + deltas = agg.aggregate(level="all", window=window)[0] + + if not len(deltas): return float("NaN") else: - return (total * 1000) / length + return np.median(deltas) * 1000 def assertPeriod( self, -- cgit v1.2.3 From 6462c1eed6d58a36afc246d2bec73076fa1bffcc Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 10 Nov 2015 14:35:42 +0000 Subject: doc: convert SchedDeadline.ipynb to nbformat v3 We aim to maintain compatibility with Ubuntu 14.04 LTS which ships an ipython that can only open notebooks with format 3. --- docs/notebooks/sched/SchedDeadline.ipynb | 1236 +++++++++++++++--------------- 1 file changed, 621 insertions(+), 615 deletions(-) diff --git a/docs/notebooks/sched/SchedDeadline.ipynb b/docs/notebooks/sched/SchedDeadline.ipynb index 0172bb2..dac2890 100644 --- a/docs/notebooks/sched/SchedDeadline.ipynb +++ b/docs/notebooks/sched/SchedDeadline.ipynb @@ -1,642 +1,648 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Setup" - ] + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": false + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 }, - "outputs": [], - "source": [ - "from trappy.stats.Topology import Topology\n", - "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", - "from bart.sched.SchedAssert import SchedAssert\n", - "import trappy\n", - "import os\n", - "import operator\n", - "import json\n", - "\n", - "#Define a CPU Topology (for multi-cluster systems)\n", - "BIG = [1, 2]\n", - "LITTLE = [0, 3, 4, 5]\n", - "CLUSTERS = [BIG, LITTLE]\n", - "topology = Topology(clusters=CLUSTERS)\n", - "\n", - "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", - "\n", - "THRESHOLD = 10.0\n", - "def between_threshold(a, b):\n", - " return abs(((a - b) * 100.0) / b) < THRESHOLD" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Periodic Yield" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", - "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", - "\n", - "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" - ] + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.9" }, + "name": "" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": false - }, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: Period\n", - "{\n", - " \"1844\": {\n", - " \"period\": 1.0085000000401578, \n", - " \"task_name\": \"periodic_yield\"\n", - " }, \n", - " \"1845\": {\n", - " \"period\": 29.822017857142669, \n", - " \"task_name\": \"periodic_yield\"\n", - " }\n", - "}\n", - "\n", - "PASS: DutyCycle\n", - "{\n", - " \"1844\": {\n", - " \"task_name\": \"periodic_yield\", \n", - " \"dutycycle\": 0.074749999998857675\n", - " }, \n", - " \"1845\": {\n", - " \"task_name\": \"periodic_yield\", \n", - " \"dutycycle\": 0.03862499999343072\n", - " }\n", - "}\n" + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Setup" ] - } - ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "\n", - "# Assert Period\n", - "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", - "if s.assertPeriod(30, between_threshold, rank=1):\n", - " print \"PASS: Period\"\n", - " print json.dumps(s.getPeriod(), indent=3)\n", - "\n", - "print \"\"\n", - " \n", - "# Assert DutyCycle \n", - "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", - " print \"PASS: DutyCycle\"\n", - " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# CPU Hog" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "collapsed": false - }, - "outputs": [ + }, { - "data": { - "text/html": [ - "\n", - "
\n", - " \n", - "
" - ], - "text/plain": [ - "" - ] - }, + "cell_type": "code", + "collapsed": false, + "input": [ + "from trappy.stats.Topology import Topology\n", + "from bart.sched.SchedMultiAssert import SchedMultiAssert\n", + "from bart.sched.SchedAssert import SchedAssert\n", + "import trappy\n", + "import os\n", + "import operator\n", + "import json\n", + "\n", + "#Define a CPU Topology (for multi-cluster systems)\n", + "BIG = [1, 2]\n", + "LITTLE = [0, 3, 4, 5]\n", + "CLUSTERS = [BIG, LITTLE]\n", + "topology = Topology(clusters=CLUSTERS)\n", + "\n", + "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n", + "\n", + "THRESHOLD = 10.0\n", + "def between_threshold(a, b):\n", + " return abs(((a - b) * 100.0) / b) < THRESHOLD" + ], + "language": "python", "metadata": {}, - "output_type": "display_data" + "outputs": [], + "prompt_number": 3 }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "PASS: DutyCycle\n", - "{\n", - " \"1852\": {\n", - " \"task_name\": \"cpuhog\", \n", - " \"dutycycle\": 10.050119999991693\n", - " }\n", - "}\n" + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Periodic Yield" ] - } - ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", - "s.plot().view()\n", - "\n", - "# Assert DutyCycle\n", - "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", - " print \"PASS: DutyCycle\"\n", - " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Changing Reservations" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": false - }, - "outputs": [ + }, { - "data": { - "text/html": [ - "\n", - "
\n", - " \n", - "
" - ], - "text/plain": [ - "" - ] - }, + "cell_type": "markdown", "metadata": {}, - "output_type": "display_data" + "source": [ + "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n", + "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n", + "\n", + "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "WINDOW -> [0.00, 2.00]\n", - "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", + "cell_type": "code", + "collapsed": false, + "input": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", "\n", - "WINDOW -> [2.00, 4.00]\n", - "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", + "# Assert Period\n", + "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", + "if s.assertPeriod(30, between_threshold, rank=1):\n", + " print \"PASS: Period\"\n", + " print json.dumps(s.getPeriod(), indent=3)\n", "\n", - "WINDOW -> [4.00, 6.00]\n", - "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [6.00, 8.00]\n", - "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", - "\n", - "WINDOW -> [8.00, 10.00]\n", - "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", + "print \"\"\n", + " \n", + "# Assert DutyCycle \n", + "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: Period\n", + "{\n", + " \"1844\": {\n", + " \"period\": 1.0085000000401578, \n", + " \"task_name\": \"periodic_yield\"\n", + " }, \n", + " \"1845\": {\n", + " \"period\": 29.822017857142669, \n", + " \"task_name\": \"periodic_yield\"\n", + " }\n", + "}\n", + "\n", + "PASS: DutyCycle\n", + "{\n", + " \"1844\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.074749999998857675\n", + " }, \n", + " \"1845\": {\n", + " \"task_name\": \"periodic_yield\", \n", + " \"dutycycle\": 0.03862499999343072\n", + " }\n", + "}\n" + ] + } + ], + "prompt_number": 10 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "CPU Hog" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", + "s.plot().view()\n", "\n", - "WINDOW -> [10.00, 12.00]\n", - "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", + "# Assert DutyCycle\n", + "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n", + " print \"PASS: DutyCycle\"\n", + " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "html": [ + "\n", + "
\n", + " \n", + "
" + ], + "metadata": {}, + "output_type": "display_data", + "text": [ + "" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "PASS: DutyCycle\n", + "{\n", + " \"1852\": {\n", + " \"task_name\": \"cpuhog\", \n", + " \"dutycycle\": 10.050119999991693\n", + " }\n", + "}\n" + ] + } + ], + "prompt_number": 11 + }, + { + "cell_type": "heading", + "level": 1, + "metadata": {}, + "source": [ + "Changing Reservations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", + "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", + "s.plot().view()\n", "\n", - "WINDOW -> [12.00, 14.00]\n", - "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", + "NUM_PHASES = 10\n", + "PHASE_DURATION = 2\n", + "start = s.getStartTime()\n", + "DUTY_CYCLE_FACTOR = 10\n", "\n", - "WINDOW -> [14.00, 16.00]\n", - "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", "\n", - "WINDOW -> [16.00, 18.00]\n", - "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", + "for phase in range(NUM_PHASES + 1):\n", + " window = (start + (phase * PHASE_DURATION),\n", + " start + ((phase + 1) * PHASE_DURATION))\n", + " \n", + " if phase % 2 == 0:\n", + " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", + " else:\n", + " DUTY_CYCLE = 100\n", "\n", - "WINDOW -> [18.00, 20.00]\n", - "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", "\n", - "WINDOW -> [20.00, 22.00]\n", - "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", - "\n" - ] + " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", + " window[1])\n", + " \n", + " \n", + " \n", + " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", + " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " else:\n", + " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", + " s.getDutyCycle(window=window),\n", + " THRESHOLD)\n", + " \n", + " print \"\"" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "html": [ + "\n", + "
\n", + " \n", + "
" + ], + "metadata": {}, + "output_type": "display_data", + "text": [ + "" + ] + }, + { + "output_type": "stream", + "stream": "stdout", + "text": [ + "WINDOW -> [0.00, 2.00]\n", + "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [2.00, 4.00]\n", + "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [4.00, 6.00]\n", + "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [6.00, 8.00]\n", + "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [8.00, 10.00]\n", + "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [10.00, 12.00]\n", + "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [12.00, 14.00]\n", + "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [14.00, 16.00]\n", + "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [16.00, 18.00]\n", + "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [18.00, 20.00]\n", + "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n", + "\n", + "WINDOW -> [20.00, 22.00]\n", + "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n", + "\n" + ] + } + ], + "prompt_number": 4 } ], - "source": [ - "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", - "s.plot().view()\n", - "\n", - "NUM_PHASES = 10\n", - "PHASE_DURATION = 2\n", - "start = s.getStartTime()\n", - "DUTY_CYCLE_FACTOR = 10\n", - "\n", - "\n", - "for phase in range(NUM_PHASES + 1):\n", - " window = (start + (phase * PHASE_DURATION),\n", - " start + ((phase + 1) * PHASE_DURATION))\n", - " \n", - " if phase % 2 == 0:\n", - " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n", - " else:\n", - " DUTY_CYCLE = 100\n", - "\n", - "\n", - " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n", - " window[1])\n", - " \n", - " \n", - " \n", - " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n", - " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", - " s.getDutyCycle(window=window),\n", - " THRESHOLD)\n", - " else:\n", - " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n", - " s.getDutyCycle(window=window),\n", - " THRESHOLD)\n", - " \n", - " print \"\"" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.9" + "metadata": {} } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file -- cgit v1.2.3 From b03236f09aafd38f635650420eadffccbb8eb819 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 10 Nov 2015 14:40:30 +0000 Subject: setup: Depend on trappy >= 3.0 trappy has changed the API of stats.SchedConf in 0fce01a7dad8 ("stats: SchedConf: Fix for period calculation"), the return value of period() is changed. This is fixed in bart in 0e83faeeee8a ("sched: Use Median for calculation of the period of a task"). --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0c6de39..f0eac2f 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ to assert behaviours using the FTrace output from the kernel """ REQUIRES = [ - "TRAPpy>=2.0", + "TRAPpy>=3.0", ] setup(name='bart-py', -- cgit v1.2.3 From c9211760aa45c463a384c8f90c206d091dfa7d9f Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Tue, 27 Oct 2015 14:40:33 +0800 Subject: sched: SchedMultiAssert: Add missing pids argument The class should accept either pids or execnames as specified in the API reference. This fixes issue #23. Signed-off-by: Kapileshwar Singh --- bart/sched/SchedMultiAssert.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index e949d2c..2792079 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -136,12 +136,21 @@ class SchedMultiAssert(object): functionality """ - def __init__(self, run, topology, execnames): + def __init__(self, run, topology, execnames=None, pids=None): - self._execnames = listify(execnames) self._run = Utils.init_run(run) - self._pids = self._populate_pids() self._topology = topology + + if execnames and pids: + raise ValueError('Either pids or execnames must be specified') + if execnames: + self._execnames = listify(execnames) + self._pids = self._populate_pids() + elif pids: + self._pids = pids + else: + raise ValueError('One of PIDs or execnames must be specified') + self._asserts = self._populate_asserts() self._populate_methods() -- cgit v1.2.3 From b27bc3c8ca942df039593fef7a20db3aee3c1502 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 11 Nov 2015 17:48:33 +0000 Subject: sched: fix documentation c9211760aa45 ("sched: SchedMultiAssert: Add missing pids argument") called the pids argument "pids". Fix the documentation accordingly. --- bart/sched/SchedMultiAssert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 2792079..94feb56 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -46,8 +46,8 @@ class SchedMultiAssert(object): match. :type execname: list, str - :param pid: The process ID of the task to be analysed - :type pid: list, int + :param pids: The process IDs of the tasks to be analysed + :type pids: list, int Consider the following processes which need to be analysed -- cgit v1.2.3 From d1f1a8e6e9236511a7194b18e4a114d1a124e6c1 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 19 Nov 2015 10:20:34 +0000 Subject: bart 1.2.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f0eac2f..7068f3e 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.1.0" +VERSION = "1.2.0" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From 01554cc19f301becb9bbd165da7cadc1f57ed5a3 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 19 Nov 2015 14:56:29 +0000 Subject: common: Pass kwargs to Parser constructor for grammar This allows the user to change various options for the grammar, e.g. method for reindexing, handling of NaNs etc Signed-off-by: Kapileshwar Singh --- bart/common/Analyzer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index 80c71a4..f23ea26 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -37,8 +37,8 @@ class Analyzer(object): :type config: dict """ - def __init__(self, data, config, topology=None): - self._parser = Parser(data, config, topology) + def __init__(self, data, config, **kwargs): + self._parser = Parser(data, config, **kwargs) def assertStatement(self, statement, select=None): """Solve the statement for a boolean result -- cgit v1.2.3 From a1d6bb773dd71e0831dced5ef61ee893029f9646 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 25 Nov 2015 16:22:32 +0000 Subject: common: Add Utils Function for summation of time for contiguous values A utility function that can be used to sum all the intervals in a time series where the value was equal to an expected value contiguously. Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 7a1a3b4..82556bd 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -16,6 +16,10 @@ """Utility functions for sheye""" import trappy +import numpy as np + +# pylint fails to recognize numpy members. +# pylint: disable=no-member def init_run(trace): """Initialize the Run Object @@ -52,3 +56,67 @@ def select_window(series, window): selector = ((ix >= start) & (ix <= stop)) window_series = series[selector] return window_series + + +def interval_sum(series, value=None): + """A function that returns the sum of the + intervals where the value of series is equal to + the expected value. Consider the following time + series data + + ====== ======= + Time Value + ====== ======= + 1 0 + 2 0 + 3 1 + 4 1 + 5 1 + 6 1 + 7 0 + 8 1 + 9 0 + 10 1 + 11 1 + ====== ======= + + 1 occurs contiguously between the following indices + the series: + + - 3 to 6 + - 10 to 11 + + There for `interval_sum` for the value 1 is + + .. math:: + + (6 - 3) + (11 - 10) = 4 + + :param series: The time series data + :type series: :mod:`pandas.Series` + + :param value: The value to checked for in the series. If the + value is None, the truth value of the elements in the + series will be used + :type value: element + """ + + index = series.index + array = series.values + + time_splits = np.append(np.where(np.diff(array) != 0), len(array) - 1) + + prev = 0 + time = 0 + + for split in time_splits: + + first_val = series[index[split]] + check = (first_val == value) if value else first_val + + if check and prev != split: + time += index[split] - index[prev] + + prev = split + 1 + + return time -- cgit v1.2.3 From 17fe9d01b6771888a44d6a039b337a84c32e64e8 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Sat, 28 Nov 2015 16:10:32 +0000 Subject: tests: Add test case for interval_sum Signed-off-by: Kapileshwar Singh --- tests/test_common_utils.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 tests/test_common_utils.py diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py new file mode 100644 index 0000000..ae89a72 --- /dev/null +++ b/tests/test_common_utils.py @@ -0,0 +1,28 @@ +from bart.common import Utils +import unittest +import pandas as pd + + +class TestCommonUtils(unittest.TestCase): + + def __init__(self, *args, **kwargs): + super(TestCommonUtils, self).__init__(*args, **kwargs) + + def test_interval_sum(self): + """Test Utils Function: interval_sum""" + + array = [0, 0, 1, 1, 1, 1, 0, 0] + series = pd.Series(array) + self.assertEqual(Utils.interval_sum(series, 1), 3) + + array = [False, False, True, True, True, True, False, False] + series = pd.Series(array) + self.assertEqual(Utils.interval_sum(series), 3) + + array = [0, 0, 1, 0, 0, 0] + series = pd.Series(array) + self.assertEqual(Utils.interval_sum(series, 1), 0) + + array = [0, 0, 1, 0, 1, 1] + series = pd.Series(array) + self.assertEqual(Utils.interval_sum(series, 1), 1) -- cgit v1.2.3 From 080b1ef4403871aa3f726902a6ec0fc06aacf3dd Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Sun, 29 Nov 2015 23:34:50 +0000 Subject: common: Add function to calculate area under curve Add a function to calculate area under a time series curve Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 82556bd..90e5321 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -57,6 +57,103 @@ def select_window(series, window): window_series = series[selector] return window_series +def area_under_curve(series, sign=None, method="trapz", step="post"): + """Return the area under the time series curve (Integral) + + :param series: The time series to be integrated + :type series: :mod:`pandas.Series` + + :param sign: Clip the data for the area in positive + or negative regions. Can have two values + + - `"+"` + - `"-"` + :type sign: str + + :param method: The method for area calculation. This can + be any of the integration methods supported in `numpy` + or `rect` + :type param: str + + :param step: The step behaviour for `rect` method + :type step: str + + *Rectangular Method* + + - Step: Post + + Consider the following time series data + + .. code:: + + 2 *----*----*----+ + | | + 1 | *----*----+ + | + 0 *----*----+ + 0 1 2 3 4 5 6 7 + + .. code:: + + import pandas as pd + a = [0, 0, 2, 2, 2, 1, 1] + s = pd.Series(a) + + The area under the curve is: + + .. math:: + + \sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\ + (2 \\times 3) + (1 \\times 2) = 8 + + - Step: Pre + + .. code:: + + 2 +----*----*----* + | | + 1 | +----*----*----+ + | + 0 *----* + 0 1 2 3 4 5 6 7 + + .. code:: + + import pandas as pd + a = [0, 0, 2, 2, 2, 1, 1] + s = pd.Series(a) + + The area under the curve is: + + .. math:: + + \sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\ + (2 \\times 3) + (1 \\times 3) = 9 + """ + + if sign == "+": + series = series.clip_lower(0) + elif sign == "=": + series = series.clip_upper(0) + + series = series.dropna() + + if method == "rect": + + if step == "post": + values = series.values[:-1] + elif step == "pre": + values = series.values[1:] + else: + raise ValueError("Invalid Value for step: {}".format(step)) + + return (values * np.diff(series.index)).sum() + + if hasattr(np, method): + np_integ_method = getattr(np, method) + np_integ_method(series.values, series.index) + else: + raise ValueError("Invalid method: {}".format(method)) def interval_sum(series, value=None): """A function that returns the sum of the -- cgit v1.2.3 From 9415bf5dea6532f3222a9fa1ec4c60db3e5d76cb Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Sun, 29 Nov 2015 23:36:48 +0000 Subject: tests: Add test case for area_under_curve Signed-off-by: Kapileshwar Singh --- tests/test_common_utils.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index ae89a72..4f72325 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -26,3 +26,44 @@ class TestCommonUtils(unittest.TestCase): array = [0, 0, 1, 0, 1, 1] series = pd.Series(array) self.assertEqual(Utils.interval_sum(series, 1), 1) + + def test_area_under_curve(self): + """Test Utils function: area_under_curve""" + + array = [0, 0, 2, 2, 2, 1, 1, 1] + series = pd.Series(array) + + # Area under curve post stepping + self.assertEqual( + Utils.area_under_curve( + series, + method="rect", + step="post"), + 8) + + # Area under curve pre stepping + self.assertEqual( + Utils.area_under_curve( + series, + method="rect", + step="pre"), + 9) + + array = [1] + series = pd.Series(array) + + # Area under curve post stepping, edge case + self.assertEqual( + Utils.area_under_curve( + series, + method="rect", + step="post"), + 0) + + # Area under curve pre stepping, edge case + self.assertEqual( + Utils.area_under_curve( + series, + method="rect", + step="pre"), + 0) -- cgit v1.2.3 From d91f4894d64d14e832e3284bf06f771e71fa86f3 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 1 Dec 2015 14:55:02 +0000 Subject: setup: release bart 1.3.0 --- docs/api_reference/conf.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 3b9b1d3..1c8b61e 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -79,9 +79,9 @@ author = u'Kapileshwar Singh(KP), Javi Merino' # built documents. # # The short X.Y version. -version = '1' +version = '1.3' # The full version, including alpha/beta/rc tags. -release = '1.0.0' +release = '1.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 7068f3e..87b0b21 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.2.0" +VERSION = "1.3.0" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From 28417bbb0ee9afdd67ee94bbb7b95f370bee1b8c Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 1 Dec 2015 15:00:33 +0000 Subject: gitignore: ignore files generated by setuptools "python setup.py sdist" and friends generate all these folders. Ignore them. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 32e54fb..4ddc47c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ *.pyc .ipynb_checkpoints example_trace_dat* +/dist/ +/build/ +/bart_py.egg-info/ -- cgit v1.2.3 From 002578a69632d1f2dc4944136e6191be16d2a5ee Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 26 Nov 2015 11:32:31 +0000 Subject: tests: Add missing license header Signed-off-by: Kapileshwar Singh --- tests/test_common_utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index 4f72325..ff6456a 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -1,3 +1,18 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + from bart.common import Utils import unittest import pandas as pd -- cgit v1.2.3 From 5706782346edf3398b5d301ad7f0a9a608fd62f2 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Thu, 26 Nov 2015 12:15:24 +0000 Subject: tests: Add base class and trace files for testing Using the trace and utils_tests from TRAPpy Signed-off-by: Kapileshwar Singh --- tests/trace.raw.txt | 7 +++++++ tests/trace.txt | 7 +++++++ tests/utils_tests.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+) create mode 100644 tests/trace.raw.txt create mode 100644 tests/trace.txt create mode 100644 tests/utils_tests.py diff --git a/tests/trace.raw.txt b/tests/trace.raw.txt new file mode 100644 index 0000000..f66d55b --- /dev/null +++ b/tests/trace.raw.txt @@ -0,0 +1,7 @@ +version = 6 +CPU 3 is empty +CPU 4 is empty +cpus=6 + ls-4734 [002] 106439.675591: sched_switch: prev_comm=trace-cmd prev_pid=4734 prev_prio=120 prev_state=1024 next_comm=migration/2 next_pid=18 next_prio=0 + migration/2-18 [002] 106439.675613: sched_switch: prev_comm=migration/2 prev_pid=18 prev_prio=0 prev_state=1 next_comm=trace-cmd next_pid=4732 next_prio=120 + trace-cmd-4730 [001] 106439.675718: sched_switch: prev_comm=trace-cmd prev_pid=4730 prev_prio=120 prev_state=1 next_comm=trace-cmd next_pid=4729 next_prio=120 diff --git a/tests/trace.txt b/tests/trace.txt new file mode 100644 index 0000000..4fbf4c9 --- /dev/null +++ b/tests/trace.txt @@ -0,0 +1,7 @@ +version = 6 +CPU 3 is empty +CPU 4 is empty +cpus=6 + ls-4734 [002] 106439.675591: sched_switch: trace-cmd:4734 [120] R ==> migration/2:18 [0] + migration/2-18 [002] 106439.675613: sched_switch: migration/2:18 [0] S ==> trace-cmd:4732 [120] + trace-cmd-4731 [001] 106439.675698: sched_switch: trace-cmd:4731 [120] S ==> trace-cmd:4730 [120] diff --git a/tests/utils_tests.py b/tests/utils_tests.py new file mode 100644 index 0000000..63a050e --- /dev/null +++ b/tests/utils_tests.py @@ -0,0 +1,56 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import unittest +import os +import shutil +import subprocess +import tempfile + +TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) + + +class SetupDirectory(unittest.TestCase): + + def __init__(self, files_to_copy, *args, **kwargs): + self.files_to_copy = files_to_copy + super(SetupDirectory, self).__init__(*args, **kwargs) + + def setUp(self): + self.previous_dir = os.getcwd() + + self.out_dir = tempfile.mkdtemp() + os.chdir(self.out_dir) + + for src_fname, dst_fname in self.files_to_copy: + src_fname = os.path.join(TESTS_DIRECTORY, src_fname) + shutil.copy(src_fname, os.path.join(self.out_dir, dst_fname)) + + def tearDown(self): + os.chdir(self.previous_dir) + shutil.rmtree(self.out_dir) + + +class TestBART(SetupDirectory): + + def __init__(self, *args, **kwargs): + super(TestBART, self).__init__( + [ + ("./trace.txt", "trace.txt"), + ("./trace.raw.txt", "trace.raw.txt") + ], + *args, + **kwargs) -- cgit v1.2.3 From 261b7e50b33d19f1cdd0bad71b9a32ff782f2f44 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 2 Dec 2015 11:39:32 +0000 Subject: tests: import test_sched_assert from trappy Now that bart has a testsuite, this test can be moved from trappy here. --- tests/raw_trace.dat | Bin 0 -> 2437120 bytes tests/test_sched_assert.py | 70 +++++++++++++++++++++++++++++++++++++++++++++ tests/utils_tests.py | 10 +++++++ 3 files changed, 80 insertions(+) create mode 100644 tests/raw_trace.dat create mode 100644 tests/test_sched_assert.py diff --git a/tests/raw_trace.dat b/tests/raw_trace.dat new file mode 100644 index 0000000..adfb449 Binary files /dev/null and b/tests/raw_trace.dat differ diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py new file mode 100644 index 0000000..e65a94b --- /dev/null +++ b/tests/test_sched_assert.py @@ -0,0 +1,70 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import trappy +from trappy.stats.Topology import Topology +import unittest + +import utils_tests + +from bart.sched.SchedAssert import SchedAssert + +@unittest.skipUnless(utils_tests.trace_cmd_installed(), + "trace-cmd not installed") +class TestSchedAssert(utils_tests.SetupDirectory): + + def __init__(self, *args, **kwargs): + + self.BIG = [1,2] + self.LITTLE = [0, 3, 4, 5] + self.clusters = [self.BIG, self.LITTLE] + self.topology = Topology(clusters=self.clusters) + super(TestSchedAssert, self).__init__( + [("raw_trace.dat", "trace.dat")], + *args, + **kwargs) + + def test_get_runtime(self): + + r = trappy.Run() + # The ls process is process we are + # testing against with pre calculated + # values + process = "ls" + + # Complete duration + expected_time = 0.0034740000264719129 + s = SchedAssert(r, self.topology, execname=process) + self.assertAlmostEqual(s.getRuntime(), expected_time, places=9) + self.assertAlmostEqual(s.getRuntime(), expected_time, places=9) + + # Non Interrupted Window + window = (0.0034, 0.003525) + expected_time = 0.000125 + self.assertAlmostEqual(s.getRuntime(window=window), expected_time, + places=9) + + # Interrupted Window + window = (0.0030, 0.0032) + expected_time = 0.000166 + self.assertAlmostEqual(s.getRuntime(window=window), expected_time, + places=9) + + # A window with multiple interruptions + window = (0.0027, 0.0036) + expected_time = 0.000817 + self.assertAlmostEqual(s.getRuntime(window=window), expected_time, + places=9) diff --git a/tests/utils_tests.py b/tests/utils_tests.py index 63a050e..8216ea5 100644 --- a/tests/utils_tests.py +++ b/tests/utils_tests.py @@ -23,6 +23,16 @@ import tempfile TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) +def trace_cmd_installed(): + """Return true if trace-cmd is installed, false otherwise""" + with open(os.devnull) as devnull: + try: + subprocess.check_call(["trace-cmd", "options"], stdout=devnull) + except OSError: + return False + + return True + class SetupDirectory(unittest.TestCase): def __init__(self, files_to_copy, *args, **kwargs): -- cgit v1.2.3 From 70bd78e89d0edfb39bf312c510ba2d55c324c324 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:07:19 +0000 Subject: common: Use iloc for indexing series This is not only faster but using the index can possibly result in erroneous results Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 90e5321..87c28ba 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -208,7 +208,7 @@ def interval_sum(series, value=None): for split in time_splits: - first_val = series[index[split]] + first_val = series.iloc[split] check = (first_val == value) if value else first_val if check and prev != split: -- cgit v1.2.3 From 18de1f6690fb8fc3c144839dbefe81757e69a052 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:09:42 +0000 Subject: common: Missing Return statement Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 87c28ba..886c868 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -151,7 +151,7 @@ def area_under_curve(series, sign=None, method="trapz", step="post"): if hasattr(np, method): np_integ_method = getattr(np, method) - np_integ_method(series.values, series.index) + return np_integ_method(series.values, series.index) else: raise ValueError("Invalid method: {}".format(method)) -- cgit v1.2.3 From bb5f74788a6a16890967fb1bfc441a28e98c5d50 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:11:10 +0000 Subject: common: Return float values from Util functions This prevents truncation when the results are used in a ratio calculation Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 886c868..6d70742 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -147,7 +147,7 @@ def area_under_curve(series, sign=None, method="trapz", step="post"): else: raise ValueError("Invalid Value for step: {}".format(step)) - return (values * np.diff(series.index)).sum() + return float((values * np.diff(series.index)).sum()) if hasattr(np, method): np_integ_method = getattr(np, method) @@ -216,4 +216,4 @@ def interval_sum(series, value=None): prev = split + 1 - return time + return float(time) -- cgit v1.2.3 From fb9f1e122bceb6664803f7ad495e6fe289969c3f Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 15:22:26 +0000 Subject: common: Use step behaviours when calculating interval sums There are two step behaviours we need to handle for the interval_sum * post * pre Signed-off-by: Kapileshwar Singh --- bart/common/Utils.py | 92 +++++++++++++++++++++++++++++++++++++--------- tests/test_common_utils.py | 32 +++++++++++----- 2 files changed, 98 insertions(+), 26 deletions(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 6d70742..d9c90a8 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -155,39 +155,77 @@ def area_under_curve(series, sign=None, method="trapz", step="post"): else: raise ValueError("Invalid method: {}".format(method)) -def interval_sum(series, value=None): +def interval_sum(series, value=None, step="post"): """A function that returns the sum of the intervals where the value of series is equal to the expected value. Consider the following time - series data + series data: ====== ======= Time Value ====== ======= + 0 0 1 0 - 2 0 + 2 1 3 1 4 1 5 1 - 6 1 - 7 0 - 8 1 - 9 0 - 10 1 + 8 0 + 9 1 + 10 0 11 1 + 12 1 ====== ======= - 1 occurs contiguously between the following indices - the series: + .. note:: - - 3 to 6 - - 10 to 11 + The time/index values, in general, may not be + uniform. This causes difference in the + the values of :func:`interval_sum` for **step-pre** + and **step-post** behaviours - There for `interval_sum` for the value 1 is + .. code:: - .. math:: + import pandas - (6 - 3) + (11 - 10) = 4 + values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1] + index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12] + series = pandas.Series(values, index=index) + + The :func:`interval_sum` for the value 1 is calculated differently + for **step-post** and **step-pre** behaviours as follows: + + - **Step-Post** + + + .. code:: + + 1 *----*----*----*-------------+ *----+ *----* + | | | | | + 0 *----*----+ *----+ *----+ + 0 1 2 3 4 5 6 7 8 9 10 11 12 + + .. math:: + + (8-2) + (10-9) + (12-11) = 6 + 1 + 1 = 8 + + - **Step-Pre** + + .. code:: + + 1 +----*----*----*----* +----* +----*----* + | | | | | + 0 *----* +--------------* +----* + 0 1 2 3 4 5 6 7 8 9 10 11 12 + + .. math:: + + (5-1) + (9-8) + (12-10) = 4 + 1 + 2 = 7 + + .. note:: + + The asterisks (*) on the plots above represent the values of the time + series data and these do not vary between the two step styles :param series: The time series data :type series: :mod:`pandas.Series` @@ -196,6 +234,13 @@ def interval_sum(series, value=None): value is None, the truth value of the elements in the series will be used :type value: element + + :param step: The step behaviour as described above + :: + + step="post" + step="pre + :type step: str """ index = series.index @@ -205,14 +250,27 @@ def interval_sum(series, value=None): prev = 0 time = 0 + step_post = True + + if step == "pre": + step_post = False + elif step != "post": + raise ValueError("Invalid value for step: {}".format(step)) for split in time_splits: first_val = series.iloc[split] check = (first_val == value) if value else first_val + if check: + start = prev + end = split + + if step_post: + end = split + 1 if split < len(series) - 1 else split + else: + start = prev - 1 if prev > 1 else prev - if check and prev != split: - time += index[split] - index[prev] + time += index[end] - index[start] prev = split + 1 diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index ff6456a..8f45fa9 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -26,21 +26,35 @@ class TestCommonUtils(unittest.TestCase): def test_interval_sum(self): """Test Utils Function: interval_sum""" - array = [0, 0, 1, 1, 1, 1, 0, 0] + # A series with a non uniform index + # Refer to the example illustrations in the + # the interval sum docs-strings which explains + # the difference between step-post and ste-pre + # calculations + values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1] + index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12] + series = pd.Series(values, index=index) + + self.assertEqual(Utils.interval_sum(series, 1, step="post"), 8) + self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 7) + + # check left boundary + array = [1, 1, 0, 0] series = pd.Series(array) - self.assertEqual(Utils.interval_sum(series, 1), 3) - array = [False, False, True, True, True, True, False, False] - series = pd.Series(array) - self.assertEqual(Utils.interval_sum(series), 3) + self.assertEqual(Utils.interval_sum(series, 1, step="post"), 2) + self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 1) - array = [0, 0, 1, 0, 0, 0] + # check right boundary + array = [0, 0, 1, 1] series = pd.Series(array) - self.assertEqual(Utils.interval_sum(series, 1), 0) - array = [0, 0, 1, 0, 1, 1] + self.assertEqual(Utils.interval_sum(series, 1, step="post"), 1) + self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 2) + + array = [False, False, True, True, True, True, False, False] series = pd.Series(array) - self.assertEqual(Utils.interval_sum(series, 1), 1) + self.assertEqual(Utils.interval_sum(series), 4) def test_area_under_curve(self): """Test Utils function: area_under_curve""" -- cgit v1.2.3 From 226295edd2119124b0e696b80dadfc92957d3533 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:46:31 +0000 Subject: common: Add SignalCompare Facilitate the comparison of a pair of signals and quantify this comparison Signed-off-by: Kapileshwar Singh --- bart/common/signal.py | 252 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 bart/common/signal.py diff --git a/bart/common/signal.py b/bart/common/signal.py new file mode 100644 index 0000000..f6696fb --- /dev/null +++ b/bart/common/signal.py @@ -0,0 +1,252 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +**Signals** + + - Definition + + A signal is a string representation of a TRAPpy event and the + column in the same event. The signal can be of two types: + + - *Pivoted Signal* + + A pivoted signal has a pivot specified in its event class. + This means that the signal in the event is a concatenation of different + signals which belong to different **pivot** nodes. The analysis for pivoted + signals must be done by decomposing them into pivoted signals for each node. + + For example, an even that represents the load of the CPU can be pivoted on + :code:`"cpu"` which should be a column in the event's `DataFrame` + + - *Non-Pivoted Signal* + + A non pivoted signal has an event that has no pivot value associated with it. + This probably means that signal has one component and can be analysed without + decomposing it into smaller signals. + + - Representation + + The following are valid representations of a signal + + - :code:`"event_name:event_column"` + - :code:`"trappy.event.class:event_column"` + +""" + +from trappy.stats.grammar import Parser +from trappy.stats import StatConf +from bart.common.Utils import area_under_curve, interval_sum + +# pylint: disable=invalid-name +# pylint: disable=anomalous-backslash-in-string + +class SignalCompare(object): + + """ + :param data: TRAPpy Run Object + :type data: :mod:`trappy.run.Run` + + :param sig_a: The first signal + :type sig_a: str + + :param sig_b: The first signal + :type sig_b: str + + :param config: A dictionary of variables, classes + and functions that can be used in the statements + :type config: dict + + :param method: The method to be used for reindexing data + This can be one of the standard :mod:`pandas.DataFrame` + methods (eg. pad, bfill, nearest). The default is pad + or use the last valid observation. + :type method: str + + :param limit: The number of indices a value will be propagated + when reindexing. The default is None + :type limit: int + + :param fill: Whether to fill the NaNs in the data. + The default value is True. + :type fill: bool + + .. note:: + + Both the signals must have the same pivots. For example: + + - Signal A has a pivot as :code:`"cpu"` which means that + the trappy event (:mod:`trappy.base.Base`) has a pivot + parameter which is equal to :code:`"cpu"`. Then the signal B + should also have :code:`"cpu"` as it's pivot. + + - Signal A and B can both have undefined or None + as their pivots + """ + + def __init__(self, data, sig_a, sig_b, **kwargs): + + self._parser = Parser( + data, + config=kwargs.pop( + "config", + None), + **kwargs) + self._a = sig_a + self._b = sig_b + self._pivot_vals, self._pivot = self._get_signal_pivots() + + # Concatenate the indices by doing any operation (say add) + self._a_data = self._parser.solve(sig_a) + self._b_data = self._parser.solve(sig_b) + + def _get_signal_pivots(self): + """Internal function to check pivot conditions and + return an intersection of pivot on the signals""" + + sig_a_info = self._parser.inspect(self._a) + sig_b_info = self._parser.inspect(self._b) + + if sig_a_info["pivot"] != sig_b_info["pivot"]: + raise RuntimeError("The pivot column for both signals" + + "should be same (%s,%s)" + % (sig_a_info["pivot"], sig_b_info["pivot"])) + + if sig_a_info["pivot"]: + pivot_vals = set( + sig_a_info["pivot_values"]).intersection(sig_b_info["pivot_values"]) + pivoted = sig_a_info["pivot"] + else: + pivot_vals = [StatConf.GRAMMAR_DEFAULT_PIVOT] + pivoted = False + + return pivot_vals, pivoted + + def conditional_compare(self, condition, **kwargs): + """Conditionally compare two signals + + The conditional comparison of signals has two components: + + - **Value Coefficient** :math:`\\alpha_{v}` which measures the difference in values of + of the two signals when the condition is true: + + .. math:: + + \\alpha_{v} = \\frac{area\_under\_curve(S_A\ |\ C(t)\ is\ true)} + {area\_under\_curve(S_B\ |\ C(t)\ is\ true)} \\\\ + + \\alpha_{v} = \\frac{\int S_A(\{t\ |\ C(t)\})dt}{\int S_B(\{t\ |\ C(t)\})dt} + + - **Time Coefficient** :math:`\\alpha_{t}` which measures the time during which the + condition holds true. + + .. math:: + + \\alpha_{t} = \\frac{T_{valid}}{T_{total}} + + :param condition: A condition that returns a truth value and obeys the grammar syntax + :: + + "event_x:sig_a > event_x:sig_b" + + :type condition: str + + :param method: The method for area calculation. This can + be any of the integration methods supported in `numpy` + or `rect` + :type param: str + + :param step: The step behaviour for area and time + summation calculation + :type step: str + + Consider the two signals A and B as follows: + + .. code:: + + A = [0, 0, 0, 3, 3, 0, 0, 0] + B = [0, 0, 2, 2, 2, 2, 1, 1] + + + .. code:: + + + A = xxxx + 3 *xxxx*xxxx+ B = ---- + | | + 2 *----*----*----+ + | | | + 1 | | *----*----+ + | | | + 0 *x-x-*x-x-+xxxx+ +xxxx*xxxx+ + 0 1 2 3 4 5 6 7 + + The condition: + + .. math:: + + A > B + + is valid between T=3 and T=5. Therefore, + + .. math:: + + \\alpha_v=1.5 \\\\ + \\alpha_t=\\frac{2}{7} + + :returns: There are two cases: + + - **Pivoted Signals** + :: + + { + "pivot_name" : { + "pval_1" : (v1,t1), + "pval_2" : (v2, t2) + } + } + - **Non Pivoted Signals** + + The tuple of :math:`(\\alpha_v, \\alpha_t)` + """ + + if self._pivot: + result = {self._pivot: {}} + + mask = self._parser.solve(condition) + step = kwargs.get("step", "post") + + for pivot_val in self._pivot_vals: + + a_piv = self._a_data[pivot_val] + b_piv = self._b_data[pivot_val] + + area = area_under_curve(a_piv[mask[pivot_val]], **kwargs) + try: + area /= area_under_curve(b_piv[mask[pivot_val]], **kwargs) + except ZeroDivisionError: + area = float("nan") + + duration = min(a_piv.last_valid_index(), b_piv.last_valid_index()) + duration -= max(a_piv.first_valid_index(), + b_piv.first_valid_index()) + duration = interval_sum(mask[pivot_val], step=step) / duration + + if self._pivot: + result[self._pivot][pivot_val] = area, duration + else: + result = area, duration + + return result -- cgit v1.2.3 From d3c6845e83514f59f421d042cab9cf8fa817e33c Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:56:17 +0000 Subject: tests: Add tests for common.signal.SignalCompare Signed-off-by: Kapileshwar Singh --- tests/test_signal.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 tests/test_signal.py diff --git a/tests/test_signal.py b/tests/test_signal.py new file mode 100644 index 0000000..369086c --- /dev/null +++ b/tests/test_signal.py @@ -0,0 +1,43 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pandas as pd +import trappy +from utils_tests import TestBART +from bart.common.signal import SignalCompare + + +class TestSignalCompare(TestBART): + + def __init__(self, *args, **kwargs): + super(TestSignalCompare, self).__init__(*args, **kwargs) + + def test_conditional_compare(self): + """Test conditional_compare""" + + A = [0, 0, 0, 3, 3, 0, 0, 0] + B = [0, 0, 2, 2, 2, 2, 1, 1] + + run = trappy.Run(".", events=["event"]) + df = pd.DataFrame({"A": A, "B": B}) + run.event.data_frame = df + + s = SignalCompare(run, "event:A", "event:B") + expected = (1.5, 2.0 / 7) + self.assertEqual( + s.conditional_compare( + "event:A > event:B", + method="rect"), + expected) -- cgit v1.2.3 From 6f612cba3ef9ee574401bfca02625b6f2c3c5824 Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 02:48:19 +0000 Subject: common: Add specialized signal comparison functions Two functions added: * Compare overshooting of A w.r.t B * Compare undershooting of A w.r.t B Signed-off-by: Kapileshwar Singh --- bart/common/signal.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_signal.py | 4 ++++ 2 files changed, 52 insertions(+) diff --git a/bart/common/signal.py b/bart/common/signal.py index f6696fb..13b1644 100644 --- a/bart/common/signal.py +++ b/bart/common/signal.py @@ -250,3 +250,51 @@ class SignalCompare(object): result = area, duration return result + + def get_overshoot(self, **kwargs): + """Special case for :func:`conditional_compare` + where the condition is: + :: + + "sig_a > sig_b" + + :param method: The method for area calculation. This can + be any of the integration methods supported in `numpy` + or `rect` + :type param: str + + :param step: The step behaviour for calculation of area + and time summation + :type step: str + + .. seealso:: + + :func:`conditional_compare` + """ + + condition = " ".join([self._a, ">", self._b]) + return self.conditional_compare(condition, **kwargs) + + def get_undershoot(self, **kwargs): + """Special case for :func:`conditional_compare` + where the condition is: + :: + + "sig_a < sig_b" + + :param method: The method for area calculation. This can + be any of the integration methods supported in `numpy` + or `rect` + :type param: str + + :param step: The step behaviour for calculation of area + and time summation + :type step: str + + .. seealso:: + + :func:`conditional_compare` + """ + + condition = " ".join([self._a, "<", self._b]) + return self.conditional_compare(condition, **kwargs) diff --git a/tests/test_signal.py b/tests/test_signal.py index 369086c..d8c98b8 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -27,6 +27,10 @@ class TestSignalCompare(TestBART): def test_conditional_compare(self): """Test conditional_compare""" + # Refer to the example in + # bart.common.signal.SignalCompare.conditional_compare + # doc-strings which explains the calculation for the + # data set below A = [0, 0, 0, 3, 3, 0, 0, 0] B = [0, 0, 2, 2, 2, 2, 1, 1] -- cgit v1.2.3 From cf6db4e4c2979cc684248b24eae98b2f06c9294c Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Wed, 2 Dec 2015 16:28:37 +0000 Subject: tests: Add tests for conditional_comparison special cases Signed-off-by: Kapileshwar Singh --- tests/test_signal.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/test_signal.py b/tests/test_signal.py index d8c98b8..fa302d4 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -17,6 +17,7 @@ import pandas as pd import trappy from utils_tests import TestBART from bart.common.signal import SignalCompare +import numpy as np class TestSignalCompare(TestBART): @@ -45,3 +46,59 @@ class TestSignalCompare(TestBART): "event:A > event:B", method="rect"), expected) + + def test_get_overshoot(self): + """Test get_overshoot""" + + A = [0, 0, 0, 3, 3, 0, 0, 0] + B = [0, 0, 2, 2, 2, 2, 1, 1] + + run = trappy.Run(".", events=["event"]) + df = pd.DataFrame({"A": A, "B": B}) + run.event.data_frame = df + + s = SignalCompare(run, "event:A", "event:B") + expected = (1.5, 2.0 / 7) + self.assertEqual( + s.get_overshoot(method="rect"), + expected) + + A = [0, 0, 0, 1, 1, 0, 0, 0] + B = [0, 0, 2, 2, 2, 2, 1, 1] + + df = pd.DataFrame({"A": A, "B": B}) + run.event.data_frame = df + s = SignalCompare(run, "event:A", "event:B") + + expected = (float("nan"), 0.0) + result = s.get_overshoot(method="rect") + self.assertTrue(np.isnan(result[0])) + self.assertEqual(result[1], expected[1]) + + def test_get_undershoot(self): + """Test get_undershoot""" + + A = [0, 0, 0, 1, 1, 1, 1, 1] + B = [2, 2, 2, 2, 2, 2, 2, 2] + + run = trappy.Run(".", events=["event"]) + df = pd.DataFrame({"A": A, "B": B}) + run.event.data_frame = df + + s = SignalCompare(run, "event:A", "event:B") + expected = (4.0 / 14.0, 1.0) + self.assertEqual( + s.get_undershoot(method="rect"), + expected) + + A = [3, 3, 3, 3, 3, 3, 3, 3] + B = [2, 2, 2, 2, 2, 2, 1, 1] + + df = pd.DataFrame({"A": A, "B": B}) + run.event.data_frame = df + s = SignalCompare(run, "event:A", "event:B") + + expected = (float("nan"), 0.0) + result = s.get_undershoot(method="rect") + self.assertTrue(np.isnan(result[0])) + self.assertEqual(result[1], expected[1]) -- cgit v1.2.3 From 6f3f68e0b52589806d5b89b2eaa754f411433d3a Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 4 Dec 2015 16:55:49 +0000 Subject: docs: Update URL path for MathJax This is to avoid the error: was loaded over HTTPS, but requested an insecure script: 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'. This request has been blocked; the content must be served over HTTPS. Signed-off-by: Kapileshwar Singh --- docs/api_reference/conf.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 1c8b61e..cc75127 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -55,6 +55,9 @@ extensions = [ 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] +# Update MathJax path to use the cdnjs using HTTPS +mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] -- cgit v1.2.3 From 8f65a403eead2df535a006c901264f6f6454eb61 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 7 Dec 2015 14:54:13 +0000 Subject: bart: grow your own listify bart imports listify from trappy.plotter.utils. trappy.plotter requires matplotlib, which makes bart require matplotlib. For listify. Get your own version of listify to avoid the cross-import. --- bart/common/Utils.py | 10 ++++++++++ bart/sched/SchedAssert.py | 3 +-- bart/sched/SchedMatrix.py | 3 +-- bart/sched/SchedMultiAssert.py | 3 +-- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index d9c90a8..137bf73 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -21,6 +21,16 @@ import numpy as np # pylint fails to recognize numpy members. # pylint: disable=no-member +def listify(to_select): + """Utitlity function to handle both single and + list inputs + """ + + if not isinstance(to_select, list): + to_select = [to_select] + + return to_select + def init_run(trace): """Initialize the Run Object diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index b3cadfc..f9d19f4 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -22,7 +22,6 @@ to aggregate statistics over processor hierarchies. import trappy import itertools import math -from trappy.plotter.Utils import listify from trappy.stats.Aggregator import MultiTriggerAggregator from trappy.stats import SchedConf as sconf from bart.common import Utils @@ -609,7 +608,7 @@ class SchedAssert(object): """ first_cpu = self.getFirstCpu(window=window) - cpus = listify(cpus) + cpus = Utils.listify(cpus) return first_cpu in cpus def generate_events(self, level, start_id=0, window=None): diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index 3a91fae..a01ad71 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -70,7 +70,6 @@ import trappy import numpy as np from trappy.stats.Aggregator import MultiTriggerAggregator from trappy.stats.Correlator import Correlator -from trappy.plotter.Utils import listify from trappy.stats import SchedConf as sconf from bart.common import Utils @@ -156,7 +155,7 @@ class SchedMatrix(object): run = Utils.init_run(trace) reference_run = Utils.init_run(reference_trace) - self._execnames = listify(execnames) + self._execnames = Utils.listify(execnames) self._reference_pids = self._populate_pids(reference_run) self._pids = self._populate_pids(run) self._dimension = len(self._pids) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 94feb56..94404d7 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -20,7 +20,6 @@ import re import inspect import trappy from trappy.stats import SchedConf as sconf -from trappy.plotter.Utils import listify from bart.sched.SchedAssert import SchedAssert from bart.common import Utils @@ -144,7 +143,7 @@ class SchedMultiAssert(object): if execnames and pids: raise ValueError('Either pids or execnames must be specified') if execnames: - self._execnames = listify(execnames) + self._execnames = Utils.listify(execnames) self._pids = self._populate_pids() elif pids: self._pids = pids -- cgit v1.2.3 From d437831e4e605bcc37d8ab80d12c8ae395f2b6aa Mon Sep 17 00:00:00 2001 From: Kapileshwar Singh Date: Fri, 11 Dec 2015 16:01:04 +0000 Subject: sched: functions: Moved from trappy.stats.SchedConf Update the respective usage of SchedConf to that from bart.sched.functions Signed-off-by: Kapileshwar Singh --- bart/sched/SchedAssert.py | 40 +-- bart/sched/SchedMatrix.py | 14 +- bart/sched/SchedMultiAssert.py | 6 +- bart/sched/functions.py | 597 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 627 insertions(+), 30 deletions(-) create mode 100644 bart/sched/functions.py diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index f9d19f4..92dbc4a 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -23,7 +23,7 @@ import trappy import itertools import math from trappy.stats.Aggregator import MultiTriggerAggregator -from trappy.stats import SchedConf as sconf +from bart.sched import functions as sched_funcs from bart.common import Utils import numpy as np @@ -77,7 +77,7 @@ class SchedAssert(object): self._pid = self._validate_pid(pid) self._aggs = {} self._topology = topology - self._triggers = sconf.sched_triggers(self._run, self._pid, + self._triggers = sched_funcs.sched_triggers(self._run, self._pid, trappy.sched.SchedSwitch) self.name = "{}-{}".format(self.execname, self._pid) @@ -85,7 +85,7 @@ class SchedAssert(object): """Validate the passed pid argument""" if not pid: - pids = sconf.get_pids_for_process(self._run, + pids = sched_funcs.get_pids_for_process(self._run, self.execname) if len(pids) != 1: @@ -98,7 +98,7 @@ class SchedAssert(object): elif self.execname: - pids = sconf.get_pids_for_process(self._run, + pids = sched_funcs.get_pids_for_process(self._run, self.execname) if pid not in pids: raise RuntimeError( @@ -106,7 +106,7 @@ class SchedAssert(object): pid, self.execname)) else: - self.execname = sconf.get_task_name(self._run, pid) + self.execname = sched_funcs.get_task_name(self._run, pid) return pid @@ -172,7 +172,7 @@ class SchedAssert(object): # Get the index of the node in the level node_index = self._topology.get_index(level, node) - agg = self._aggregator(sconf.residency_sum) + agg = self._aggregator(sched_funcs.residency_sum) level_result = agg.aggregate(level=level, window=window) node_value = level_result[node_index] @@ -243,8 +243,8 @@ class SchedAssert(object): :return: The first time the task ran across all the CPUs """ - agg = self._aggregator(sconf.first_time) - result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + agg = self._aggregator(sched_funcs.first_time) + result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING) return min(result[0]) def getEndTime(self): @@ -253,9 +253,9 @@ class SchedAssert(object): all the CPUs """ - agg = self._aggregator(sconf.first_time) - agg = self._aggregator(sconf.last_time) - result = agg.aggregate(level="all", value=sconf.TASK_RUNNING) + agg = self._aggregator(sched_funcs.first_time) + agg = self._aggregator(sched_funcs.last_time) + result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING) return max(result[0]) def _relax_switch_window(self, series, direction, window): @@ -277,8 +277,8 @@ class SchedAssert(object): even in the extended window """ - series = series[series == sconf.TASK_RUNNING] - w_series = sconf.select_window(series, window) + series = series[series == sched_funcs.TASK_RUNNING] + w_series = sched_funcs.select_window(series, window) start, stop = window if direction == "left": @@ -286,7 +286,7 @@ class SchedAssert(object): return w_series.index.values[-1] else: start_time = self.getStartTime() - w_series = sconf.select_window( + w_series = sched_funcs.select_window( series, window=( start_time, @@ -302,7 +302,7 @@ class SchedAssert(object): return w_series.index.values[0] else: end_time = self.getEndTime() - w_series = sconf.select_window(series, window=(stop, end_time)) + w_series = sched_funcs.select_window(series, window=(stop, end_time)) if not len(w_series): return None @@ -344,7 +344,7 @@ class SchedAssert(object): from_node_index = self._topology.get_index(level, from_node) to_node_index = self._topology.get_index(level, to_node) - agg = self._aggregator(sconf.csum) + agg = self._aggregator(sched_funcs.csum) level_result = agg.aggregate(level=level) from_node_result = level_result[from_node_index] @@ -379,7 +379,7 @@ class SchedAssert(object): .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertRuntime` """ - agg = self._aggregator(sconf.residency_sum) + agg = self._aggregator(sched_funcs.residency_sum) run_time = agg.aggregate(level="all", window=window)[0] if percent: @@ -470,7 +470,7 @@ class SchedAssert(object): .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertPeriod` """ - agg = self._aggregator(sconf.period) + agg = self._aggregator(sched_funcs.period) deltas = agg.aggregate(level="all", window=window)[0] if not len(deltas): @@ -586,7 +586,7 @@ class SchedAssert(object): .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertFirstCPU` """ - agg = self._aggregator(sconf.first_cpu) + agg = self._aggregator(sched_funcs.first_cpu) result = agg.aggregate(level="cpu", window=window) result = list(itertools.chain.from_iterable(result)) @@ -619,7 +619,7 @@ class SchedAssert(object): :mod:`bart.sched.SchedMultiAssert` class for plotting data """ - agg = self._aggregator(sconf.trace_event) + agg = self._aggregator(sched_funcs.trace_event) result = agg.aggregate(level=level, window=window) events = [] diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index a01ad71..5088c7b 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -70,7 +70,7 @@ import trappy import numpy as np from trappy.stats.Aggregator import MultiTriggerAggregator from trappy.stats.Correlator import Correlator -from trappy.stats import SchedConf as sconf +from bart.sched import functions as sched_funcs from bart.common import Utils POSITIVE_TOLERANCE = 0.80 @@ -150,7 +150,7 @@ class SchedMatrix(object): trace, topology, execnames, - aggfunc=sconf.csum): + aggfunc=sched_funcs.csum): run = Utils.init_run(trace) reference_run = Utils.init_run(reference_trace) @@ -171,12 +171,12 @@ class SchedMatrix(object): """Populate the qualifying PIDs from the run""" if len(self._execnames) == 1: - return sconf.get_pids_for_process(run, self._execnames[0]) + return sched_funcs.get_pids_for_process(run, self._execnames[0]) pids = [] for proc in self._execnames: - pids += sconf.get_pids_for_process(run, proc) + pids += sched_funcs.get_pids_for_process(run, proc) return list(set(pids)) @@ -190,7 +190,7 @@ class SchedMatrix(object): reference_aggs.append( MultiTriggerAggregator( - sconf.sched_triggers( + sched_funcs.sched_triggers( reference_run, self._reference_pids[idx], trappy.sched.SchedSwitch @@ -200,7 +200,7 @@ class SchedMatrix(object): aggs.append( MultiTriggerAggregator( - sconf.sched_triggers( + sched_funcs.sched_triggers( run, self._pids[idx], trappy.sched.SchedSwitch @@ -222,7 +222,7 @@ class SchedMatrix(object): corr = Correlator( ref_result, test_result, - corrfunc=sconf.binary_correlate, + corrfunc=sched_funcs.binary_correlate, filter_gaps=True) _, total = corr.correlate(level="cluster") diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 94404d7..62de8bd 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -19,7 +19,7 @@ statistics aggregation framework""" import re import inspect import trappy -from trappy.stats import SchedConf as sconf +from bart.sched import functions as sched_funcs from bart.sched.SchedAssert import SchedAssert from bart.common import Utils @@ -167,12 +167,12 @@ class SchedMultiAssert(object): """Map the input execnames to PIDs""" if len(self._execnames) == 1: - return sconf.get_pids_for_process(self._run, self._execnames[0]) + return sched_funcs.get_pids_for_process(self._run, self._execnames[0]) pids = [] for proc in self._execnames: - pids += sconf.get_pids_for_process(self._run, proc) + pids += sched_funcs.get_pids_for_process(self._run, proc) return list(set(pids)) diff --git a/bart/sched/functions.py b/bart/sched/functions.py new file mode 100644 index 0000000..5353e39 --- /dev/null +++ b/bart/sched/functions.py @@ -0,0 +1,597 @@ +# Copyright 2015-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Scheduler specific Functionality for the +stats framework + +The Scheduler stats aggregation is based on a signal +which is generated by the combination of two triggers +from the events with the following parameters + +========================= ============ ============= +EVENT VALUE FILTERS +========================= ============ ============= +:func:`sched_switch` 1 next_pid +:func:`sched_switch` -1 prev_pid +========================= ============ ============= + +Both these Triggers are provided by the event +:mod:`trappy.sched.SchedSwitch` which correspond to +the :code:`sched_switch` unique word in the trace + +.. seealso:: :mod:`trappy.stats.Trigger.Trigger` + +Using the above information the following signals are +generated. + +**EVENT SERIES** + +This is a combination of the two triggers as specified +above and has alternating +/- 1 values and is merely +a representation of the position in time when the process +started or stopped running on a CPU + +**RESIDENCY SERIES** + +This series is a cumulative sum of the event series and +is a representation of the continuous residency of the +process on a CPU + +The pivot for the aggregators is the CPU on which the +event occurred on. If N is the number of CPUs in the +system, N signal for each CPU are generated. These signals +can then be aggregated by specifying a Topology + +.. seealso:: :mod:`trappy.stats.Topology.Topology` +""" + +import numpy as np +from trappy.stats.Trigger import Trigger + +WINDOW_SIZE = 0.0001 +"""A control config for filter events. Some analyses +may require ignoring of small interruptions""" + +# Trigger Values +SCHED_SWITCH_IN = 1 +"""Value of the event when a task is **switch in** +or scheduled on a CPU""" +SCHED_SWITCH_OUT = -1 +"""Value of the event when a task is **switched out** +or relinquishes a CPU""" +NO_EVENT = 0 +"""Signifies no event on an event trace""" + +# Field Names +CPU_FIELD = "__cpu" +"""The column in the sched_switch event that +indicates the CPU on which the event occurred +""" +NEXT_PID_FIELD = "next_pid" +"""The column in the sched_switch event that +indicates the PID of the next process to be scheduled +""" +PREV_PID_FIELD = "prev_pid" +"""The column in the sched_switch event that +indicates the PID of the process that was scheduled +in +""" +TASK_RUNNING = 1 +"""The column in the sched_switch event that +indicates the CPU on which the event occurred +""" +TASK_NOT_RUNNING = 0 +"""In a residency series, a zero indicates +that the task is not running +""" +TIME_INVAL = -1 +"""Standard Value to indicate invalid time data""" +SERIES_SANTIZED = "_sched_sanitized" +"""A memoized flag which is set when an event series +is checked for boundary conditions +""" + + +def sanitize_asymmetry(series, window=None): + """Sanitize the cases when a :code:`SWITCH_OUT` + happens before a :code:`SWITCH_IN`. (The case when + a process is already running before the trace started) + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + """ + + if not hasattr(series, SERIES_SANTIZED): + + events = series[series != 0] + if len(series) >= 2 and len(events): + if series.values[0] == SCHED_SWITCH_OUT: + series.values[0] = TASK_NOT_RUNNING + + elif events.values[0] == SCHED_SWITCH_OUT: + series.values[0] = SCHED_SWITCH_IN + if window: + series.index.values[0] = window[0] + + if series.values[-1] == SCHED_SWITCH_IN: + series.values[-1] = TASK_NOT_RUNNING + + elif events.values[-1] == SCHED_SWITCH_IN: + series.values[-1] = SCHED_SWITCH_OUT + if window: + series.index.values[-1] = window[1] + + # No point if the series just has one value and + # one event. We do not have sufficient data points + # for any calculation. We should Ideally never reach + # here. + elif len(series) == 1: + series.values[0] = 0 + + setattr(series, SERIES_SANTIZED, True) + + return series + + +def csum(series, window=None, filter_gaps=False): + """:func:`aggfunc` for the cumulative sum of the + input series data + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :param filter_gaps: If set, a process being switched out + for :mod:`bart.sched.functions.WINDOW_SIZE` is + ignored. This is helpful when small interruptions need + to be ignored to compare overall correlation + :type filter_gaps: bool + """ + + if filter_gaps: + series = filter_small_gaps(series) + + series = series.cumsum() + return select_window(series, window) + +def filter_small_gaps(series): + """A helper function that does filtering of gaps + in residency series < :mod:`bart.sched.functions.WINDOW_SIZE` + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + """ + + start = None + for index, value in series.iteritems(): + + if value == SCHED_SWITCH_IN: + if start == None: + continue + + if index - start < WINDOW_SIZE: + series[start] = NO_EVENT + series[index] = NO_EVENT + start = None + + if value == SCHED_SWITCH_OUT: + start = index + + return series + +def first_cpu(series, window=None): + """:func:`aggfunc` to calculate the time of + the first switch in event in the series + This is returned as a vector of unit length + so that it can be aggregated and reduced across + nodes to find the first cpu of a task + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + """ + series = select_window(series, window) + series = series[series == SCHED_SWITCH_IN] + if len(series): + return [series.index.values[0]] + else: + return [float("inf")] + +def select_window(series, window): + """Helper Function to select a portion of + pandas time series + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + """ + + if not window: + return series + + start, stop = window + ix = series.index + selector = ((ix >= start) & (ix <= stop)) + window_series = series[selector] + return window_series + +def residency_sum(series, window=None): + """:func:`aggfunc` to calculate the total + residency + + + The input series is processed for + intervals between a :mod:`bart.sched.functions.SCHED_SWITCH_OUT` + and :mod:`bart.sched.functions.SCHED_SWITCH_IN` to track + additive residency of a task + + .. math:: + + S_{in} = i_{1}, i_{2}...i_{N} \\\\ + S_{out} = o_{1}, o_{2}...o_{N} \\\\ + R_{total} = \sum_{k}^{N}\Delta_k = \sum_{k}^{N}(o_{k} - i_{k}) + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :return: A scalar float value + """ + + if not len(series): + return 0.0 + + org_series = series + series = select_window(series, window) + series = sanitize_asymmetry(series, window) + + s_in = series[series == SCHED_SWITCH_IN] + s_out = series[series == SCHED_SWITCH_OUT] + + if not (len(s_in) and len(s_out)): + try: + org_series = sanitize_asymmetry(org_series) + running = select_window(org_series.cumsum(), window) + if running.values[0] == TASK_RUNNING and running.values[-1] == TASK_RUNNING: + return window[1] - window[0] + except Exception,e: + pass + + if len(s_in) != len(s_out): + raise RuntimeError( + "Unexpected Lengths: s_in={}, s_out={}".format( + len(s_in), + len(s_out))) + else: + return np.sum(s_out.index.values - s_in.index.values) + + +def first_time(series, value, window=None): + """:func:`aggfunc` to: + + - Return the first index where the + series == value + + - If no such index is found + +inf is returned + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :return: A vector of Unit Length + """ + + series = select_window(series, window) + series = series[series == value] + + if not len(series): + return [float("inf")] + + return [series.index.values[0]] + + +def period(series, align="start", window=None): + """This :func:`aggfunc` returns a tuple + of the average duration between two triggers: + + - When :code:`align=start` the :code:`SCHED_IN` + trigger is used + + - When :code:`align=end` the :code:`SCHED_OUT` + trigger is used + + + .. math:: + + E = e_{1}, e_{2}...e_{N} \\\\ + T_p = \\frac{\sum_{j}^{\lfloor N/2 \\rfloor}(e_{2j + 1} - e_{2j})}{N} + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :return: + A list of deltas of successive starts/stops + of a task + + """ + + series = select_window(series, window) + series = sanitize_asymmetry(series, window) + + if align == "start": + series = series[series == SCHED_SWITCH_IN] + elif align == "end": + series = series[series == SCHED_SWITCH_OUT] + + if len(series) % 2 == 0: + series = series[:1] + + if not len(series): + return [] + + return list(np.diff(series.index.values)) + +def last_time(series, value, window=None): + """:func:`aggfunc` to: + + - The first index where the + series == value + + - If no such index is found + :mod:`bart.sched.functions.TIME_INVAL` + is returned + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :return: A vector of Unit Length + """ + + series = select_window(series, window) + series = series[series == value] + if not len(series): + return [TIME_INVAL] + + return [series.index.values[-1]] + + +def binary_correlate(series_x, series_y): + """Helper function to Correlate binary Data + + Both the series should have same indices + + For binary time series data: + + .. math:: + + \\alpha_{corr} = \\frac{N_{agree} - N_{disagree}}{N} + + :param series_x: First time Series data + :type series_x: :mod:`pandas.Series` + + :param series_y: Second time Series data + :type series_y: :mod:`pandas.Series` + """ + + if len(series_x) != len(series_y): + raise ValueError("Cannot compute binary correlation for \ + unequal vectors") + + agree = len(series_x[series_x == series_y]) + disagree = len(series_x[series_x != series_y]) + + return (agree - disagree) / float(len(series_x)) + +def get_pids_for_process(run, execname, cls=None): + """Get the PIDs for a given process + + :param run: A run object with a sched_switch + event + :type run: :mod:`trappy.run.Run` + + :param execname: The name of the process + :type execname: str + + :param cls: The SchedSwitch event class (required if + a different event is to be used) + :type cls: :mod:`trappy.base.Base` + + :return: The set of PIDs for the execname + """ + + if not cls: + try: + df = run.sched_switch.data_frame + except AttributeError: + raise ValueError("SchedSwitch event not found in run") + else: + event = getattr(run, cls.name) + df = event.data_frame + + mask = df["next_comm"].apply(lambda x : True if x.startswith(execname) else False) + return list(np.unique(df[mask]["next_pid"].values)) + +def get_task_name(run, pid, cls=None): + """Returns the execname for pid + + :param run: A run object with a sched_switch + event + :type run: :mod:`trappy.run.Run` + + :param pid: The PID of the process + :type pid: int + + :param cls: The SchedSwitch event class (required if + a different event is to be used) + :type cls: :mod:`trappy.base.Base` + + :return: The execname for the PID + """ + + if not cls: + try: + df = run.sched_switch.data_frame + except AttributeError: + raise ValueError("SchedSwitch event not found in run") + else: + event = getattr(run, cls.name) + df = event.data_frame + + df = df[df["next_pid"] == pid] + if not len(df): + return "" + else: + return df["next_comm"].values[0] + +def sched_triggers(run, pid, sched_switch_class): + """Returns the list of sched_switch triggers + + :param run: A run object with a sched_switch + event + :type run: :mod:`trappy.run.Run` + + :param pid: The PID of the associated process + :type pid: int + + :param sched_switch_class: The SchedSwitch event class + :type sched_switch_class: :mod:`trappy.base.Base` + + :return: List of triggers, such that + :: + + triggers[0] = switch_in_trigger + triggers[1] = switch_out_trigger + """ + + if not hasattr(run, "sched_switch"): + raise ValueError("SchedSwitch event not found in run") + + triggers = [] + triggers.append(sched_switch_in_trigger(run, pid, sched_switch_class)) + triggers.append(sched_switch_out_trigger(run, pid, sched_switch_class)) + return triggers + +def sched_switch_in_trigger(run, pid, sched_switch_class): + """ + :param run: A run object with a sched_switch + event + :type run: :mod:`trappy.run.Run` + + :param pid: The PID of the associated process + :type pid: int + + :param sched_switch_class: The SchedSwitch event class + :type sched_switch_class: :mod:`trappy.base.Base` + + :return: :mod:`trappy.stats.Trigger.Trigger` on + the SchedSwitch: IN for the given PID + """ + + task_in = {} + task_in[NEXT_PID_FIELD] = pid + + return Trigger(run, + sched_switch_class, # trappy Event Class + task_in, # Filter Dictionary + SCHED_SWITCH_IN, # Trigger Value + CPU_FIELD) # Primary Pivot + +def sched_switch_out_trigger(run, pid, sched_switch_class): + """ + :param run: A run object with a sched_switch + event + :type run: :mod:`trappy.run.Run` + + :param pid: The PID of the associated process + :type pid: int + + :param sched_switch_class: The SchedSwitch event class + :type sched_switch_class: :mod:`trappy.base.Base` + + :return: :mod:`trappy.stats.Trigger.Trigger` on + the SchedSwitch: OUT for the given PID + """ + + task_out = {} + task_out[PREV_PID_FIELD] = pid + + return Trigger(run, + sched_switch_class, # trappy Event Class + task_out, # Filter Dictionary + SCHED_SWITCH_OUT, # Trigger Value + CPU_FIELD) # Primary Pivot + + +def trace_event(series, window=None): + """ + :func:`aggfunc` to be used for plotting + the process residency data using + :mod:`trappy.plotter.EventPlot` + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + + :return: A list of events + of the type: + :: + + [ + [start_time_1, stop_time_1], + [start_time_2, stop_time_2], + # + # + [start_time_N, stop_time_N], + ] + """ + rects = [] + series = select_window(series, window) + series = sanitize_asymmetry(series, window) + + s_in = series[series == SCHED_SWITCH_IN] + s_out = series[series == SCHED_SWITCH_OUT] + + if not len(s_in): + return rects + + if len(s_in) != len(s_out): + raise RuntimeError( + "Unexpected Lengths: s_in={}, s_out={}".format( + len(s_in), + len(s_out))) + + return np.column_stack((s_in.index.values, s_out.index.values)) -- cgit v1.2.3 From c8b7cb8b69d7563b06e72a1abe9b91ee8e51711a Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 5 Jan 2016 16:13:08 +0000 Subject: bart: update copyright to 2016 --- bart/__init__.py | 2 +- bart/common/Analyzer.py | 2 +- bart/common/Utils.py | 2 +- bart/common/__init__.py | 2 +- bart/common/signal.py | 2 +- bart/sched/SchedAssert.py | 2 +- bart/sched/SchedMatrix.py | 2 +- bart/sched/SchedMultiAssert.py | 2 +- bart/sched/__init__.py | 2 +- bart/sched/functions.py | 2 +- bart/thermal/ThermalAssert.py | 2 +- bart/thermal/__init__.py | 2 +- docs/api_reference/conf.py | 2 +- docs/examples/thermal.py | 2 +- setup.py | 2 +- tests/test_common_utils.py | 2 +- tests/test_sched_assert.py | 2 +- tests/test_signal.py | 2 +- tests/utils_tests.py | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/bart/__init__.py b/bart/__init__.py index b5bada9..c031ebb 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index f23ea26..51194d7 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 137bf73..3ab99b6 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/common/__init__.py b/bart/common/__init__.py index ea66887..f42522b 100644 --- a/bart/common/__init__.py +++ b/bart/common/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/common/signal.py b/bart/common/signal.py index 13b1644..6860144 100644 --- a/bart/common/signal.py +++ b/bart/common/signal.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index 92dbc4a..bf141c7 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index 5088c7b..3c0a4ca 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 62de8bd..492590a 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/sched/__init__.py b/bart/sched/__init__.py index c391ecb..68133da 100644 --- a/bart/sched/__init__.py +++ b/bart/sched/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/sched/functions.py b/bart/sched/functions.py index 5353e39..7966f45 100644 --- a/bart/sched/functions.py +++ b/bart/sched/functions.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py index 1dd77a2..e7c3431 100644 --- a/bart/thermal/ThermalAssert.py +++ b/bart/thermal/ThermalAssert.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/bart/thermal/__init__.py b/bart/thermal/__init__.py index c9baee0..6eefbc2 100644 --- a/bart/thermal/__init__.py +++ b/bart/thermal/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index cc75127..583a3fd 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/examples/thermal.py b/docs/examples/thermal.py index 33c2afb..2b3e0bb 100644 --- a/docs/examples/thermal.py +++ b/docs/examples/thermal.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index 87b0b21..4534060 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index 8f45fa9..56398be 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py index e65a94b..485326d 100644 --- a/tests/test_sched_assert.py +++ b/tests/test_sched_assert.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/test_signal.py b/tests/test_signal.py index fa302d4..746f86c 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/utils_tests.py b/tests/utils_tests.py index 8216ea5..6dadca1 100644 --- a/tests/utils_tests.py +++ b/tests/utils_tests.py @@ -1,4 +1,4 @@ -# Copyright 2015-2015 ARM Limited +# Copyright 2015-2016 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -- cgit v1.2.3 From a5c1fb04cfdfc963ce6c26e2102d3e4793481d24 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 5 Jan 2016 16:13:42 +0000 Subject: setup: release bart 1.4.0 --- docs/api_reference/conf.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 583a3fd..3dcfbb3 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -82,9 +82,9 @@ author = u'Kapileshwar Singh(KP), Javi Merino' # built documents. # # The short X.Y version. -version = '1.3' +version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.3.0' +release = '1.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 4534060..26957c0 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.3.0" +VERSION = "1.4.0" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From b0718adc6d4b365527cb835a2200f5a4515fe941 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 14 Jan 2016 18:38:20 +0000 Subject: tests: add a basic assertStatement() check --- tests/test_common_utils.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index 56398be..9226ec3 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -14,8 +14,10 @@ # from bart.common import Utils +from bart.common.Analyzer import Analyzer import unittest import pandas as pd +import trappy class TestCommonUtils(unittest.TestCase): @@ -96,3 +98,18 @@ class TestCommonUtils(unittest.TestCase): method="rect", step="pre"), 0) + + +class TestAnalyzer(unittest.TestCase): + + def test_assert_statement_bool(self): + """Check that asssertStatement() works with a simple boolean case""" + + rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]}) + trace = trappy.BareTrace() + trace.add_parsed_event("dice_rolls", rolls_dfr) + config = {"MAX_DICE_NUMBER": 6} + + t = Analyzer(trace, config) + statement = "numpy.max(dice_rolls:results) <= MAX_DICE_NUMBER" + self.assertTrue(t.assertStatement(statement, select=0)) -- cgit v1.2.3 From 5af9d234eb3445a36c34695c5d1b1bd8b88d5c6f Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 14 Jan 2016 18:57:24 +0000 Subject: Analyzer: assert when the parsed statement returns a dataframe of bools Sometimes it's useful to assert things like: "event:column == 3". Teach assertStatement() handle the case where the result of parsing a statement is a dataframe of bools. --- bart/common/Analyzer.py | 11 ++++++----- tests/test_common_utils.py | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index 51194d7..d9dc74d 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -22,6 +22,7 @@ implemented yet. from trappy.stats.grammar import Parser import warnings import numpy as np +import pandas as pd # pylint: disable=invalid-name @@ -56,12 +57,12 @@ class Analyzer(object): result = self.getStatement(statement, select=select) - # pylint: disable=no-member - if not (isinstance(result, bool) or isinstance(result, np.bool_)): - warnings.warn( - "solution of {} is not an instance of bool".format(statement)) + if isinstance(result, pd.DataFrame): + result = result.all().all() + elif not(isinstance(result, bool) or isinstance(result, np.bool_)): # pylint: disable=no-member + warnings.warn("solution of {} is not boolean".format(statement)) + return result - # pylint: enable=no-member def getStatement(self, statement, reference=False, select=None): """Evaluate the statement""" diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py index 9226ec3..09b31e3 100644 --- a/tests/test_common_utils.py +++ b/tests/test_common_utils.py @@ -113,3 +113,18 @@ class TestAnalyzer(unittest.TestCase): t = Analyzer(trace, config) statement = "numpy.max(dice_rolls:results) <= MAX_DICE_NUMBER" self.assertTrue(t.assertStatement(statement, select=0)) + + def test_assert_statement_dataframe(self): + """assertStatement() works if the generated statement creates a pandas.DataFrame of bools""" + + rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]}) + trace = trappy.BareTrace() + trace.add_parsed_event("dice_rolls", rolls_dfr) + config = {"MIN_DICE_NUMBER": 1, "MAX_DICE_NUMBER": 6} + t = Analyzer(trace, config) + + statement = "(dice_rolls:results <= MAX_DICE_NUMBER) & (dice_rolls:results >= MIN_DICE_NUMBER)" + self.assertTrue(t.assertStatement(statement)) + + statement = "dice_rolls:results == 3" + self.assertFalse(t.assertStatement(statement)) -- cgit v1.2.3 From 7881a53f3898b121e04de245a80f24976ae198b6 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 27 Jan 2016 18:12:01 +0000 Subject: bart: Use trappy.FTrace instead of trappy.Run As of c26a32321053 ("ftrace: rename Run to FTrace"), trappy deprecated trappy.Run in favour of trappy.FTrace. Move bart to use the new interface. --- bart/common/Analyzer.py | 4 +-- bart/common/Utils.py | 10 +++--- bart/common/signal.py | 4 +-- bart/sched/SchedAssert.py | 24 +++++++------- bart/sched/SchedMatrix.py | 12 +++---- bart/sched/SchedMultiAssert.py | 30 +++++++++--------- bart/sched/functions.py | 54 ++++++++++++++++---------------- bart/thermal/ThermalAssert.py | 14 ++++----- docs/examples/thermal.py | 4 +-- docs/notebooks/sched/SchedDeadline.ipynb | 12 +++---- docs/notebooks/thermal/Thermal.ipynb | 10 +++--- tests/test_sched_assert.py | 2 +- tests/test_signal.py | 26 +++++++-------- 13 files changed, 103 insertions(+), 103 deletions(-) diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py index d9dc74d..7bb55c9 100644 --- a/bart/common/Analyzer.py +++ b/bart/common/Analyzer.py @@ -30,8 +30,8 @@ import pandas as pd class Analyzer(object): """ - :param data: TRAPpy Run Object - :type data: :mod:`trappy.run.Run` + :param data: TRAPpy FTrace Object + :type data: :mod:`trappy.ftrace.FTrace` :param config: A dictionary of variables, classes and functions that can be used in the statements diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 3ab99b6..fdfd1e0 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -31,18 +31,18 @@ def listify(to_select): return to_select -def init_run(trace): - """Initialize the Run Object +def init_ftrace(trace): + """Initialize the FTrace Object :param trace: Path for the trace file or a trace object - :type trace: str, :mod:`trappy.run.Run` + :type trace: str, :mod:`trappy.ftrace.FTrace` """ if isinstance(trace, basestring): - return trappy.Run(trace) + return trappy.FTrace(trace) - elif isinstance(trace, trappy.Run): + elif isinstance(trace, trappy.FTrace): return trace raise ValueError("Invalid trace Object") diff --git a/bart/common/signal.py b/bart/common/signal.py index 6860144..acf7091 100644 --- a/bart/common/signal.py +++ b/bart/common/signal.py @@ -56,8 +56,8 @@ from bart.common.Utils import area_under_curve, interval_sum class SignalCompare(object): """ - :param data: TRAPpy Run Object - :type data: :mod:`trappy.run.Run` + :param data: TRAPpy FTrace Object + :type data: :mod:`trappy.ftrace.FTrace` :param sig_a: The first signal :type sig_a: str diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index bf141c7..e20aecb 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -35,9 +35,9 @@ class SchedAssert(object): predefined scheduler scenarios. This does not compare parameters across runs - :param run: A single trappy.Run object - or a path that can be passed to trappy.Run - :type run: :mod:`trappy.run.Run` + :param ftrace: A single trappy.FTrace object + or a path that can be passed to trappy.FTrace + :type ftrace: :mod:`trappy.ftrace.FTrace` :param topology: A topology that describes the arrangement of CPU's on a system. This is useful for multi-cluster systems @@ -65,19 +65,19 @@ class SchedAssert(object): there are more than one processes with the same execname """ - def __init__(self, run, topology, execname=None, pid=None): + def __init__(self, ftrace, topology, execname=None, pid=None): - run = Utils.init_run(run) + ftrace = Utils.init_ftrace(ftrace) if not execname and not pid: raise ValueError("Need to specify at least one of pid or execname") self.execname = execname - self._run = run + self._ftrace = ftrace self._pid = self._validate_pid(pid) self._aggs = {} self._topology = topology - self._triggers = sched_funcs.sched_triggers(self._run, self._pid, + self._triggers = sched_funcs.sched_triggers(self._ftrace, self._pid, trappy.sched.SchedSwitch) self.name = "{}-{}".format(self.execname, self._pid) @@ -85,7 +85,7 @@ class SchedAssert(object): """Validate the passed pid argument""" if not pid: - pids = sched_funcs.get_pids_for_process(self._run, + pids = sched_funcs.get_pids_for_process(self._ftrace, self.execname) if len(pids) != 1: @@ -98,7 +98,7 @@ class SchedAssert(object): elif self.execname: - pids = sched_funcs.get_pids_for_process(self._run, + pids = sched_funcs.get_pids_for_process(self._ftrace, self.execname) if pid not in pids: raise RuntimeError( @@ -106,7 +106,7 @@ class SchedAssert(object): pid, self.execname)) else: - self.execname = sched_funcs.get_task_name(self._run, pid) + self.execname = sched_funcs.get_task_name(self._ftrace, pid) return pid @@ -388,7 +388,7 @@ class SchedAssert(object): begin, end = window total_time = end - begin else: - total_time = self._run.get_duration() + total_time = self._ftrace.get_duration() run_time = run_time * 100 run_time = run_time / total_time @@ -638,7 +638,7 @@ class SchedAssert(object): if not xlim: if not window: - xlim = [0, self._run.get_duration()] + xlim = [0, self._ftrace.get_duration()] else: xlim = list(window) diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py index 3c0a4ca..4595469 100755 --- a/bart/sched/SchedMatrix.py +++ b/bart/sched/SchedMatrix.py @@ -82,13 +82,13 @@ POSITIVE_TOLERANCE = 0.80 class SchedMatrix(object): """ - :param reference_trace: The trace file path/run object + :param reference_trace: The trace file path/ftrace object to be used as a reference - :type reference_trace: str, :mod:`trappy.run.Run` + :type reference_trace: str, :mod:`trappy.ftrace.FTrace` - :param trace: The trace file path/run object + :param trace: The trace file path/ftrace object to be verified - :type trace: str, :mod:`trappy.run.Run` + :type trace: str, :mod:`trappy.ftrace.FTrace` :param topology: A topology that describes the arrangement of CPU's on a system. This is useful for multi-cluster systems @@ -152,8 +152,8 @@ class SchedMatrix(object): execnames, aggfunc=sched_funcs.csum): - run = Utils.init_run(trace) - reference_run = Utils.init_run(reference_trace) + run = Utils.init_ftrace(trace) + reference_run = Utils.init_ftrace(reference_trace) self._execnames = Utils.listify(execnames) self._reference_pids = self._populate_pids(reference_run) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index 492590a..bc37798 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -27,9 +27,9 @@ class SchedMultiAssert(object): """This is vector assertion class built on top of :mod:`bart.sched.SchedAssert.SchedAssert` - :param run: A single trappy.Run object - or a path that can be passed to trappy.Run - :type run: :mod:`trappy.run.Run` + :param ftrace: A single trappy.FTrace object + or a path that can be passed to trappy.FTrace + :type ftrace: :mod:`trappy.ftrace.FTrace` :param topology: A topology that describes the arrangement of CPU's on a system. This is useful for multi-cluster systems @@ -64,17 +64,17 @@ class SchedMultiAssert(object): - Using execname prefix match :: - SchedMultiAssert(run, topology, execnames="task_") + SchedMultiAssert(ftrace, topology, execnames="task_") - Individual Task names :: - SchedMultiAssert(run, topology, execnames=["task_1", "task_2", "task_3"]) + SchedMultiAssert(ftrace, topology, execnames=["task_1", "task_2", "task_3"]) - Using Process IDs :: - SchedMultiAssert(run, topology, pids=[11, 22, 33]) + SchedMultiAssert(ftrace, topology, pids=[11, 22, 33]) All the functionality provided in :mod:`bart.sched.SchedAssert.SchedAssert` is available @@ -83,7 +83,7 @@ class SchedMultiAssert(object): For example consider the use of :func:`getDutyCycle` :: - >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") >>> s.getDutyCycle(window=(start, end)) { "11": { @@ -104,7 +104,7 @@ class SchedMultiAssert(object): :: >>> import operator as op - >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") >>> s.assertDutyCycle(15, op.ge, window=(start, end)) { "11": { @@ -127,7 +127,7 @@ class SchedMultiAssert(object): :: >>> import operator as op - >>> s = SchedMultiAssert(run, topology, execnames="task_") + >>> s = SchedMultiAssert(ftrace, topology, execnames="task_") >>> s.assertDutyCycle(15, op.ge, window=(start, end), rank=2) True @@ -135,9 +135,9 @@ class SchedMultiAssert(object): functionality """ - def __init__(self, run, topology, execnames=None, pids=None): + def __init__(self, ftrace, topology, execnames=None, pids=None): - self._run = Utils.init_run(run) + self._ftrace = Utils.init_ftrace(ftrace) self._topology = topology if execnames and pids: @@ -159,7 +159,7 @@ class SchedMultiAssert(object): asserts = {} for pid in self._pids: - asserts[pid] = SchedAssert(self._run, self._topology, pid=pid) + asserts[pid] = SchedAssert(self._ftrace, self._topology, pid=pid) return asserts @@ -167,12 +167,12 @@ class SchedMultiAssert(object): """Map the input execnames to PIDs""" if len(self._execnames) == 1: - return sched_funcs.get_pids_for_process(self._run, self._execnames[0]) + return sched_funcs.get_pids_for_process(self._ftrace, self._execnames[0]) pids = [] for proc in self._execnames: - pids += sched_funcs.get_pids_for_process(self._run, proc) + pids += sched_funcs.get_pids_for_process(self._ftrace, proc) return list(set(pids)) @@ -248,7 +248,7 @@ class SchedMultiAssert(object): if not xlim: if not window: - xlim = [0, self._run.get_duration()] + xlim = [0, self._ftrace.get_duration()] else: xlim = list(window) diff --git a/bart/sched/functions.py b/bart/sched/functions.py index 7966f45..546e8fb 100644 --- a/bart/sched/functions.py +++ b/bart/sched/functions.py @@ -414,12 +414,12 @@ def binary_correlate(series_x, series_y): return (agree - disagree) / float(len(series_x)) -def get_pids_for_process(run, execname, cls=None): +def get_pids_for_process(ftrace, execname, cls=None): """Get the PIDs for a given process - :param run: A run object with a sched_switch + :param ftrace: A ftrace object with a sched_switch event - :type run: :mod:`trappy.run.Run` + :type ftrace: :mod:`trappy.ftrace.FTrace` :param execname: The name of the process :type execname: str @@ -433,22 +433,22 @@ def get_pids_for_process(run, execname, cls=None): if not cls: try: - df = run.sched_switch.data_frame + df = ftrace.sched_switch.data_frame except AttributeError: - raise ValueError("SchedSwitch event not found in run") + raise ValueError("SchedSwitch event not found in ftrace") else: - event = getattr(run, cls.name) + event = getattr(ftrace, cls.name) df = event.data_frame mask = df["next_comm"].apply(lambda x : True if x.startswith(execname) else False) return list(np.unique(df[mask]["next_pid"].values)) -def get_task_name(run, pid, cls=None): +def get_task_name(ftrace, pid, cls=None): """Returns the execname for pid - :param run: A run object with a sched_switch + :param ftrace: A ftrace object with a sched_switch event - :type run: :mod:`trappy.run.Run` + :type ftrace: :mod:`trappy.ftrace.FTrace` :param pid: The PID of the process :type pid: int @@ -462,11 +462,11 @@ def get_task_name(run, pid, cls=None): if not cls: try: - df = run.sched_switch.data_frame + df = ftrace.sched_switch.data_frame except AttributeError: - raise ValueError("SchedSwitch event not found in run") + raise ValueError("SchedSwitch event not found in ftrace") else: - event = getattr(run, cls.name) + event = getattr(ftrace, cls.name) df = event.data_frame df = df[df["next_pid"] == pid] @@ -475,12 +475,12 @@ def get_task_name(run, pid, cls=None): else: return df["next_comm"].values[0] -def sched_triggers(run, pid, sched_switch_class): +def sched_triggers(ftrace, pid, sched_switch_class): """Returns the list of sched_switch triggers - :param run: A run object with a sched_switch + :param ftrace: A ftrace object with a sched_switch event - :type run: :mod:`trappy.run.Run` + :type ftrace: :mod:`trappy.ftrace.FTrace` :param pid: The PID of the associated process :type pid: int @@ -495,19 +495,19 @@ def sched_triggers(run, pid, sched_switch_class): triggers[1] = switch_out_trigger """ - if not hasattr(run, "sched_switch"): - raise ValueError("SchedSwitch event not found in run") + if not hasattr(ftrace, "sched_switch"): + raise ValueError("SchedSwitch event not found in ftrace") triggers = [] - triggers.append(sched_switch_in_trigger(run, pid, sched_switch_class)) - triggers.append(sched_switch_out_trigger(run, pid, sched_switch_class)) + triggers.append(sched_switch_in_trigger(ftrace, pid, sched_switch_class)) + triggers.append(sched_switch_out_trigger(ftrace, pid, sched_switch_class)) return triggers -def sched_switch_in_trigger(run, pid, sched_switch_class): +def sched_switch_in_trigger(ftrace, pid, sched_switch_class): """ - :param run: A run object with a sched_switch + :param ftrace: A ftrace object with a sched_switch event - :type run: :mod:`trappy.run.Run` + :type ftrace: :mod:`trappy.ftrace.FTrace` :param pid: The PID of the associated process :type pid: int @@ -522,17 +522,17 @@ def sched_switch_in_trigger(run, pid, sched_switch_class): task_in = {} task_in[NEXT_PID_FIELD] = pid - return Trigger(run, + return Trigger(ftrace, sched_switch_class, # trappy Event Class task_in, # Filter Dictionary SCHED_SWITCH_IN, # Trigger Value CPU_FIELD) # Primary Pivot -def sched_switch_out_trigger(run, pid, sched_switch_class): +def sched_switch_out_trigger(ftrace, pid, sched_switch_class): """ - :param run: A run object with a sched_switch + :param ftrace: A ftrace object with a sched_switch event - :type run: :mod:`trappy.run.Run` + :type ftrace: :mod:`trappy.ftrace.FTrace` :param pid: The PID of the associated process :type pid: int @@ -547,7 +547,7 @@ def sched_switch_out_trigger(run, pid, sched_switch_class): task_out = {} task_out[PREV_PID_FIELD] = pid - return Trigger(run, + return Trigger(ftrace, sched_switch_class, # trappy Event Class task_out, # Filter Dictionary SCHED_SWITCH_OUT, # Trigger Value diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py index e7c3431..d0ffa78 100644 --- a/bart/thermal/ThermalAssert.py +++ b/bart/thermal/ThermalAssert.py @@ -25,17 +25,17 @@ import numpy as np # pylint: disable=too-many-arguments class ThermalAssert(object): - """A class that accepts a TRAPpy Run object and + """A class that accepts a TRAPpy FTrace object and provides assertions for thermal behaviours - :param run: A path to the trace file or a TRAPpy Run object - :type run: str, :mod:`trappy.run.Run` + :param ftrace: A path to the trace file or a TRAPpy FTrace object + :type ftrace: str, :mod:`trappy.ftrace.FTrace` """ - def __init__(self, run, config=None): + def __init__(self, ftrace, config=None): - self._run = Utils.init_run(run) - self._analyzer = Analyzer(self._run, config) + self._ftrace = Utils.init_ftrace(ftrace) + self._analyzer = Analyzer(self._ftrace, config) def getThermalResidency(self, temp_range, window, percent=False): """Return the total time spent in a given temperature range @@ -78,7 +78,7 @@ class ThermalAssert(object): if percent: result[pivot] = ( - result[pivot] * 100.0) / self._run.get_duration() + result[pivot] * 100.0) / self._ftrace.get_duration() return result diff --git a/docs/examples/thermal.py b/docs/examples/thermal.py index 2b3e0bb..8fe3e95 100644 --- a/docs/examples/thermal.py +++ b/docs/examples/thermal.py @@ -30,7 +30,7 @@ class TestThermal(unittest.TestCase): # Which then copies the required traces for analysis to # the host. trace_file = "update_a_trace_path_here" - run = trappy.Run(trace_file, "test_run") + ftrace = trappy.FTrace(trace_file, "test_run") # Define the parameters that you intend to use in the grammar config = {} @@ -48,7 +48,7 @@ class TestThermal(unittest.TestCase): cls.BIG = '000000f0' cls.LITTLE = '0000000f' cls.tz = 0 - cls.analyzer = Analyzer(run, config) + cls.analyzer = Analyzer(ftrace, config) def test_temperature_quartile(self): """Assert Temperature quartile""" diff --git a/docs/notebooks/sched/SchedDeadline.ipynb b/docs/notebooks/sched/SchedDeadline.ipynb index dac2890..95cd0e2 100644 --- a/docs/notebooks/sched/SchedDeadline.ipynb +++ b/docs/notebooks/sched/SchedDeadline.ipynb @@ -84,10 +84,10 @@ "collapsed": false, "input": [ "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", + "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", "\n", "# Assert Period\n", - "s = SchedMultiAssert(run, topology, execnames=\"periodic_yield\")\n", + "s = SchedMultiAssert(ftrace, topology, execnames=\"periodic_yield\")\n", "if s.assertPeriod(30, between_threshold, rank=1):\n", " print \"PASS: Period\"\n", " print json.dumps(s.getPeriod(), indent=3)\n", @@ -154,8 +154,8 @@ "collapsed": false, "input": [ "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedMultiAssert(run, topology, execnames=\"cpuhog\")\n", + "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", + "s = SchedMultiAssert(ftrace, topology, execnames=\"cpuhog\")\n", "s.plot().view()\n", "\n", "# Assert DutyCycle\n", @@ -381,8 +381,8 @@ "collapsed": false, "input": [ "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n", - "run = trappy.Run(TRACE_FILE, \"cpuhog\")\n", - "s = SchedAssert(run, topology, execname=\"cpuhog\")\n", + "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n", + "s = SchedAssert(ftrace, topology, execname=\"cpuhog\")\n", "s.plot().view()\n", "\n", "NUM_PHASES = 10\n", diff --git a/docs/notebooks/thermal/Thermal.ipynb b/docs/notebooks/thermal/Thermal.ipynb index 087d9b8..bf51929 100644 --- a/docs/notebooks/thermal/Thermal.ipynb +++ b/docs/notebooks/thermal/Thermal.ipynb @@ -106,7 +106,7 @@ "level": 1, "metadata": {}, "source": [ - "Run Object" + "FTrace Object" ] }, { @@ -115,7 +115,7 @@ "input": [ "# Create a Trace object\n", "\n", - "run = trappy.Run(TRACE_FILE, \"SomeBenchMark\")" + "ftrace = trappy.FTrace(TRACE_FILE, \"SomeBenchMark\")" ], "language": "python", "metadata": {}, @@ -137,7 +137,7 @@ "# Create an Assertion Object\n", "\n", "from bart.common.Analyzer import Analyzer\n", - "t = Analyzer(run, config)\n", + "t = Analyzer(ftrace, config)\n", "\n", "BIG = '000000f0'\n", "LITTLE = '0000000f'" @@ -349,8 +349,8 @@ "input": [ "from bart.thermal.ThermalAssert import ThermalAssert\n", "\n", - "t_assert = ThermalAssert(run)\n", - "end = run.get_duration()\n", + "t_assert = ThermalAssert(ftrace)\n", + "end = ftrace.get_duration()\n", "\n", "LOW = 0\n", "HIGH = 78000\n", diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py index 485326d..f746a77 100644 --- a/tests/test_sched_assert.py +++ b/tests/test_sched_assert.py @@ -39,7 +39,7 @@ class TestSchedAssert(utils_tests.SetupDirectory): def test_get_runtime(self): - r = trappy.Run() + r = trappy.FTrace() # The ls process is process we are # testing against with pre calculated # values diff --git a/tests/test_signal.py b/tests/test_signal.py index 746f86c..889eff3 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -35,11 +35,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 3, 3, 0, 0, 0] B = [0, 0, 2, 2, 2, 2, 1, 1] - run = trappy.Run(".", events=["event"]) + ftrace = trappy.FTrace(".", events=["event"]) df = pd.DataFrame({"A": A, "B": B}) - run.event.data_frame = df + ftrace.event.data_frame = df - s = SignalCompare(run, "event:A", "event:B") + s = SignalCompare(ftrace, "event:A", "event:B") expected = (1.5, 2.0 / 7) self.assertEqual( s.conditional_compare( @@ -53,11 +53,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 3, 3, 0, 0, 0] B = [0, 0, 2, 2, 2, 2, 1, 1] - run = trappy.Run(".", events=["event"]) + ftrace = trappy.FTrace(".", events=["event"]) df = pd.DataFrame({"A": A, "B": B}) - run.event.data_frame = df + ftrace.event.data_frame = df - s = SignalCompare(run, "event:A", "event:B") + s = SignalCompare(ftrace, "event:A", "event:B") expected = (1.5, 2.0 / 7) self.assertEqual( s.get_overshoot(method="rect"), @@ -67,8 +67,8 @@ class TestSignalCompare(TestBART): B = [0, 0, 2, 2, 2, 2, 1, 1] df = pd.DataFrame({"A": A, "B": B}) - run.event.data_frame = df - s = SignalCompare(run, "event:A", "event:B") + ftrace.event.data_frame = df + s = SignalCompare(ftrace, "event:A", "event:B") expected = (float("nan"), 0.0) result = s.get_overshoot(method="rect") @@ -81,11 +81,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 1, 1, 1, 1, 1] B = [2, 2, 2, 2, 2, 2, 2, 2] - run = trappy.Run(".", events=["event"]) + ftrace = trappy.FTrace(".", events=["event"]) df = pd.DataFrame({"A": A, "B": B}) - run.event.data_frame = df + ftrace.event.data_frame = df - s = SignalCompare(run, "event:A", "event:B") + s = SignalCompare(ftrace, "event:A", "event:B") expected = (4.0 / 14.0, 1.0) self.assertEqual( s.get_undershoot(method="rect"), @@ -95,8 +95,8 @@ class TestSignalCompare(TestBART): B = [2, 2, 2, 2, 2, 2, 1, 1] df = pd.DataFrame({"A": A, "B": B}) - run.event.data_frame = df - s = SignalCompare(run, "event:A", "event:B") + ftrace.event.data_frame = df + s = SignalCompare(ftrace, "event:A", "event:B") expected = (float("nan"), 0.0) result = s.get_undershoot(method="rect") -- cgit v1.2.3 From 4bb9e40eead9eeedaf0202e1d6739854c7010396 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 27 Jan 2016 18:14:59 +0000 Subject: common: make sure that the trace inherits from BareTrace, not FTrace trappy now has a BareTrace class of which FTrace is one instance of. As all that bart needs is the interface that BareTrace provides, let bart check that the provided trace is an instance of it. --- bart/common/Utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index fdfd1e0..0240c23 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -42,7 +42,7 @@ def init_ftrace(trace): if isinstance(trace, basestring): return trappy.FTrace(trace) - elif isinstance(trace, trappy.FTrace): + elif isinstance(trace, trappy.BareTrace): return trace raise ValueError("Invalid trace Object") -- cgit v1.2.3 From e3bde0ccaef4702599d968f10fbdc60a77371f8c Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 27 Jan 2016 18:23:11 +0000 Subject: tests: make test_signal use the simplified BareTrace from trappy The signal tests all use dataframes for their tests that are manually added to the trace object. trappy has a new API to do that. You can create your trace as a BareTrace and then call add_parsed_event() to add dataframes to it. --- tests/test_signal.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/test_signal.py b/tests/test_signal.py index 889eff3..48692a9 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -35,11 +35,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 3, 3, 0, 0, 0] B = [0, 0, 2, 2, 2, 2, 1, 1] - ftrace = trappy.FTrace(".", events=["event"]) + trace = trappy.BareTrace() df = pd.DataFrame({"A": A, "B": B}) - ftrace.event.data_frame = df + trace.add_parsed_event("event", df) - s = SignalCompare(ftrace, "event:A", "event:B") + s = SignalCompare(trace, "event:A", "event:B") expected = (1.5, 2.0 / 7) self.assertEqual( s.conditional_compare( @@ -53,11 +53,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 3, 3, 0, 0, 0] B = [0, 0, 2, 2, 2, 2, 1, 1] - ftrace = trappy.FTrace(".", events=["event"]) + trace = trappy.BareTrace() df = pd.DataFrame({"A": A, "B": B}) - ftrace.event.data_frame = df + trace.add_parsed_event("event", df) - s = SignalCompare(ftrace, "event:A", "event:B") + s = SignalCompare(trace, "event:A", "event:B") expected = (1.5, 2.0 / 7) self.assertEqual( s.get_overshoot(method="rect"), @@ -67,8 +67,8 @@ class TestSignalCompare(TestBART): B = [0, 0, 2, 2, 2, 2, 1, 1] df = pd.DataFrame({"A": A, "B": B}) - ftrace.event.data_frame = df - s = SignalCompare(ftrace, "event:A", "event:B") + trace.event.data_frame = df + s = SignalCompare(trace, "event:A", "event:B") expected = (float("nan"), 0.0) result = s.get_overshoot(method="rect") @@ -81,11 +81,11 @@ class TestSignalCompare(TestBART): A = [0, 0, 0, 1, 1, 1, 1, 1] B = [2, 2, 2, 2, 2, 2, 2, 2] - ftrace = trappy.FTrace(".", events=["event"]) + trace = trappy.BareTrace() df = pd.DataFrame({"A": A, "B": B}) - ftrace.event.data_frame = df + trace.add_parsed_event("event", df) - s = SignalCompare(ftrace, "event:A", "event:B") + s = SignalCompare(trace, "event:A", "event:B") expected = (4.0 / 14.0, 1.0) self.assertEqual( s.get_undershoot(method="rect"), @@ -95,8 +95,8 @@ class TestSignalCompare(TestBART): B = [2, 2, 2, 2, 2, 2, 1, 1] df = pd.DataFrame({"A": A, "B": B}) - ftrace.event.data_frame = df - s = SignalCompare(ftrace, "event:A", "event:B") + trace.event.data_frame = df + s = SignalCompare(trace, "event:A", "event:B") expected = (float("nan"), 0.0) result = s.get_undershoot(method="rect") -- cgit v1.2.3 From c24076f200d50d137569de06839fd5205cb5b3a0 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 28 Jan 2016 14:47:58 +0000 Subject: sched: SchedAssert: fix documentation for getResidency() The example of the getResidency() function is incomplete. Fix the example so that it matches the documentation. --- bart/sched/SchedAssert.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index e20aecb..b0884f3 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -134,14 +134,15 @@ class SchedAssert(object): a particular group of a topological level. For example: :: - clusters=[] - big = [1,2] - little = [0,3,4,5] + from trappy.stats.Topology import Topology - topology = Topology(clusters=clusters) + big = [1, 2] + little = [0, 3, 4, 5] - level="cluster" - node = [1,2] + topology = Topology(clusters=[little, big]) + + s = SchedAssert(trace, topology, pid=123) + s.getResidency("cluster", big) This will return the residency of the task on the big cluster. If percent is specified it will be normalized to the total runtime -- cgit v1.2.3 From 290caafb3675103f8ee91b71245a7aba0f2b0c2c Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 10 Feb 2016 10:21:30 +0000 Subject: setup: release bart 1.5.0 --- docs/api_reference/conf.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 3dcfbb3..d5a9668 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -82,9 +82,9 @@ author = u'Kapileshwar Singh(KP), Javi Merino' # built documents. # # The short X.Y version. -version = '1.4' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.0' +release = '1.5.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 26957c0..69c3876 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.4.0" +VERSION = "1.5.0" LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. -- cgit v1.2.3 From 8eb2939f8b5113cf4efe783543e5fc57bd38e032 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 10 Feb 2016 10:27:36 +0000 Subject: docs: update copyright to 2016 --- docs/api_reference/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index d5a9668..0d54455 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -74,7 +74,7 @@ master_doc = 'index' # General information about the project. project = u'BART' -copyright = u'2015, ARM Ltd.' +copyright = u'2016, ARM Ltd.' author = u'Kapileshwar Singh(KP), Javi Merino' # The version info for the project you're documenting, acts as replacement for -- cgit v1.2.3 From 56d75eb5abeb4947c5f54593528ea6e5ed54087c Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 31 Mar 2016 10:06:40 +0100 Subject: Travis CI support for continuosly running nosetests --- .travis.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..a5cb355 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,25 @@ +language: python +python: + - "2.7" +before_install: + - sudo apt-get update -qq + - sudo apt-get install -qq libfreetype6-dev + - sudo apt-get install -qq libpng12-dev + - wget http://ftp.us.debian.org/debian/pool/main/t/trace-cmd/trace-cmd_2.4.0-1_amd64.deb + - sudo dpkg -i trace-cmd_2.4.0-1_amd64.deb +install: + - pip install matplotlib + - pip install pandas + - pip install ipython[all] + - pip install --upgrade trappy +script: nosetests +virtualenv: + system_site_packages: true +notifications: + email: + recipients: + - javi.merino@arm.com + on_success: never + on_failure: always +cache: + - pip -- cgit v1.2.3 From 9c88775ceb5505318c88a94f91e3d8e033b908f7 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 31 Mar 2016 11:23:45 +0100 Subject: README.md: improve structure of README file --- README.md | 98 ++++++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 66 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 8ad5f92..13d16b2 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,111 @@ -# Introduction +BART [![Build Status](https://travis-ci.org/ARM-software/bart.svg?branch=master)](https://travis-ci.org/ARM-software/bart) +==== -The Behavioural Analysis and Regression Toolkit is based on [TRAPpy](https://github.com/ARM-software/trappy). The primary goal is to assert behaviours using the FTrace output from the kernel +The Behavioural Analysis and Regression Toolkit is based on +[TRAPpy](https://github.com/ARM-software/trappy). The primary goal is to assert +behaviours using the FTrace output from the kernel. ## Target Audience -The framework is designed to cater to a wide range of audience. Aiding developers as well as automating -the testing of "difficult to test" behaviours. -### Kernel Developers +The framework is designed to cater to a wide range of audience. Aiding +developers as well as automating the testing of "difficult to test" behaviours. + +#### Kernel Developers Making sure that the code that you are writing is doing the right thing. -### Performance Engineers +#### Performance Engineers + +Plotting/Asserting performance behaviours between different revisions of the +kernel. -Plotting/Asserting performance behaviours between different revisions of the kernel +#### Quality Assurance/Release Engineers -### Quality Assurance/Release Engineers -Verifying behaviours when different components/patches are integrated +Verifying behaviours when different components/patches are integrated. # Installation -Clone the [BART]( https://github.com/ARM-software/bart) and [TRAPpy]( https://github.com/ARM-software/trappy) repos +## Required dependencies - git clone git@github.com:ARM-software/bart.git - git clone git@github.com:ARM-software/trappy.git +#### Install additional tools required for some tests and functionalities -Add the directories to your PYTHONPATH + $ sudo apt install trace-cmd kernelshark + +#### Install the Python package manager + + $ sudo apt install python-pip python-dev + +#### Install required python packages + + $ sudo apt install libfreetype6-dev libpng12-dev python-nose + $ sudo pip install numpy matplotlib pandas ipython[all] + $ sudo pip install --upgrade trappy - export PYTHONPATH=$BASE_DIR/bart:$BASE_DIR/trappy:$PYTHONPATH +`ipython[all]` will install [IPython +Notebook](http://ipython.org/notebook.html), a web based interactive +python programming interface. It is required if you plan to use interactive +plotting in BART. -Install dependencies +#### Install BART + + $ sudo pip install --upgrade bart-py + +# For developers + +Instead of installing TRAPpy and BART using `pip` you should the repositories: + + $ git clone git@github.com:ARM-software/bart.git + $ git clone git@github.com:ARM-software/trappy.git + +Add the directories to your PYTHONPATH - apt-get install ipython-notebook python-pandas + $ export PYTHONPATH=$BASE_DIR/bart:$BASE_DIR/trappy:$PYTHONPATH -[IPython](http://ipython.org/notebook.html) notebook is a web based interactive python programming interface. -It is required if you plan to use interactive plotting in BART. # Trace Analysis Language -BART also provides a generic Trace Analysis Language, which allows the user to construct complex relation statements on trace data and assert their expected behaviours. The usage of the Analyzer module can be seen for the thermal behaviours [here](https://github.com/ARM-software/bart/blob/master/notebooks/thermal/Thermal.ipynb) +BART also provides a generic Trace Analysis Language, which allows the user to +construct complex relation statements on trace data and assert their expected +behaviours. The usage of the Analyzer module can be seen for the thermal +behaviours +[here](https://github.com/ARM-software/bart/blob/master/notebooks/thermal/Thermal.ipynb) # Scheduler Assertions Enables assertion and the calculation of the following parameters: -### Runtime +#### Runtime The total time that the task spent on a CPU executing. -### Switch +#### Switch -Assert that a task switched between CPUs/Clusters in a given window of time +Assert that a task switched between CPUs/Clusters in a given window of time. -### Duty Cycle +#### Duty Cycle The ratio of the execution time to the total time. -### Period +#### Period -The average difference between two switch-in or two switch-out events of a task +The average difference between two switch-in or two switch-out events of a +task. -### First CPU +#### First CPU The first CPU that a task ran on. -### Residency +#### Residency -Calculate and assert the total residency of a task on a CPU or cluster +Calculate and assert the total residency of a task on a CPU or cluster. -### Examples +#### Examples -The Scheduler assertions also use TRAPpy's EventPlot to provide a kernelshark like timeline -for the tasks under consideration. (in IPython notebooks). +The Scheduler assertions also use TRAPpy's EventPlot to provide a `kernelshark` +like timeline for the tasks under consideration. (in IPython notebooks). -A notebook explaining the usage of the framework for asserting the deadline scheduler behaviours can be seen [here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html) +A notebook explaining the usage of the framework for asserting the deadline +scheduler behaviours can be seen +[here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html). -- cgit v1.2.3 From 180184c86082da9617c319b20512b3345739725a Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 31 Mar 2016 12:38:07 +0100 Subject: README: add missing "clone" --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 13d16b2..384320e 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ plotting in BART. # For developers -Instead of installing TRAPpy and BART using `pip` you should the repositories: +Instead of installing TRAPpy and BART using `pip` you should clone the repositories: $ git clone git@github.com:ARM-software/bart.git $ git clone git@github.com:ARM-software/trappy.git -- cgit v1.2.3 From 6feeff0d3d0a2dced8af577443ed0789019f7392 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 31 Mar 2016 12:40:00 +0100 Subject: README: add a link to the API reference We don't advertise enough the documentation. Add a link in the README to raise awareness. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 384320e..9079eaa 100644 --- a/README.md +++ b/README.md @@ -108,4 +108,6 @@ A notebook explaining the usage of the framework for asserting the deadline scheduler behaviours can be seen [here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html). +# API reference +The API reference can be found in https://pythonhosted.org/bart-py -- cgit v1.2.3 From cbb7b215921e4b23e9010c383f09b7319316217d Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 6 Apr 2016 11:39:44 +0100 Subject: README: explicitly state that the instructions are for Ubuntu 14.04 LTS We have always implicitly supported Ubuntu 14.04 LTS as the minimum version. State it explicitly in the README. --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 9079eaa..bead557 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,12 @@ Verifying behaviours when different components/patches are integrated. # Installation +The following instructions are for Ubuntu 14.04 LTS but they should +also work with Debian jessie. Older versions of Ubuntu or Debian +(e.g. Ubuntu 12.04 or Debian wheezy) will likely require to install +more packages from pip as the ones present in Ubuntu 12.04 or Debian +wheezy will probably be too old. + ## Required dependencies #### Install additional tools required for some tests and functionalities -- cgit v1.2.3 From 4f500910a7becdaf973f35bcc498db93e0a6b5e2 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 6 Apr 2016 12:20:48 +0100 Subject: README: add a pypi badge It's cool to point out which version is currently available in pypi. Report it next to the travis badge. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bead557..268d893 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -BART [![Build Status](https://travis-ci.org/ARM-software/bart.svg?branch=master)](https://travis-ci.org/ARM-software/bart) +BART [![Build Status](https://travis-ci.org/ARM-software/bart.svg?branch=master)](https://travis-ci.org/ARM-software/bart) [![Version](https://img.shields.io/pypi/v/bart-py.svg)](https://pypi.python.org/pypi/bart-py) ==== The Behavioural Analysis and Regression Toolkit is based on -- cgit v1.2.3 From adb8fc25ebaa2a3a46719ad5670613e75f67fa31 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 7 Jul 2016 18:15:10 +0100 Subject: bart: set the version using an explicit file There is no pythonic way of specifying the version of a project, [0] describes 7 (seven!) ways of doing it. We were currently using method 5, setting the value in setup.py and using pkg_resources to get it from the installed version. This works ok if you have installed the package using python setup.py or pip, but fails if you are importing bart from a checkout, which is what lisa do. Even worse, if you import it from lisa but have an old bart version installed, bart.__version__ will tell you the version of the installed bart, not the one you have imported and are using. Switch to use a version.py file that's distributed with the project (method 3), as trappy did in ARM-software/trappy@712e6f93b308 . Fixes #57 [0] https://packaging.python.org/en/latest/single_source_version/ --- bart/__init__.py | 7 +------ bart/version.py | 16 ++++++++++++++++ docs/api_reference/conf.py | 6 +++--- setup.py | 4 ++-- 4 files changed, 22 insertions(+), 11 deletions(-) create mode 100644 bart/version.py diff --git a/bart/__init__.py b/bart/__init__.py index c031ebb..886dbc8 100644 --- a/bart/__init__.py +++ b/bart/__init__.py @@ -18,9 +18,4 @@ import bart.sched import bart.common import bart.thermal -import pkg_resources - -try: - __version__ = pkg_resources.get_distribution("bart-py").version -except pkg_resources.DistributionNotFound: - __version__ = "local" +from bart.version import __version__ diff --git a/bart/version.py b/bart/version.py new file mode 100644 index 0000000..da8af07 --- /dev/null +++ b/bart/version.py @@ -0,0 +1,16 @@ +# Copyright 2016-2016 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +__version__ = "1.5.0" diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 0d54455..32d6653 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -81,10 +81,10 @@ author = u'Kapileshwar Singh(KP), Javi Merino' # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = '1.5' +# The short X.Y version. Drop everything after the last "." +version = bart.__version__[:bart.__version__.rindex(".")] # The full version, including alpha/beta/rc tags. -release = '1.5.0' +release = bart.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py index 69c3876..c73c225 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup, find_packages -VERSION = "1.5.0" +execfile("bart/version.py") LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general expectation of the state of the system while targeting a single or set of heuristics. @@ -34,7 +34,7 @@ REQUIRES = [ ] setup(name='bart-py', - version=VERSION, + version=__version__, license="Apache v2", author="ARM-BART", author_email="bart@arm.com", -- cgit v1.2.3 From e467a1eb564c59de66673a9a3b5795c4b16f4ce8 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 13 Jul 2016 17:05:21 +0100 Subject: sched: raise a ValueError in get_pids_for_process() if the trace doesn't contain sched_switch events --- bart/sched/functions.py | 3 +++ tests/test_sched_functions.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 tests/test_sched_functions.py diff --git a/bart/sched/functions.py b/bart/sched/functions.py index 546e8fb..8a3d7a0 100644 --- a/bart/sched/functions.py +++ b/bart/sched/functions.py @@ -436,6 +436,9 @@ def get_pids_for_process(ftrace, execname, cls=None): df = ftrace.sched_switch.data_frame except AttributeError: raise ValueError("SchedSwitch event not found in ftrace") + + if len(df) == 0: + raise ValueError("SchedSwitch event not found in ftrace") else: event = getattr(ftrace, cls.name) df = event.data_frame diff --git a/tests/test_sched_functions.py b/tests/test_sched_functions.py new file mode 100644 index 0000000..164df3c --- /dev/null +++ b/tests/test_sched_functions.py @@ -0,0 +1,39 @@ +# Copyright 2016-2016 ARM Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import trappy + +import utils_tests + +class TestSchedFunctions(utils_tests.SetupDirectory): + def __init__(self, *args, **kwargs): + super(TestSchedFunctions, self).__init__([], *args, **kwargs) + + def test_get_pids_for_processes_no_sched_switch(self): + """get_pids_for_processes() raises an exception if the trace doesn't have a sched_switch event""" + from bart.sched.functions import get_pids_for_process + + trace_file = "trace.txt" + raw_trace_file = "trace.raw.txt" + + with open(trace_file, "w") as fout: + fout.write("") + + with open(raw_trace_file, "w") as fout: + fout.write("") + + trace = trappy.FTrace(trace_file) + with self.assertRaises(ValueError): + get_pids_for_process(trace, "foo") -- cgit v1.2.3 From 32edbcf6867f9e211259178046fedd4a95cdbe1f Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Wed, 13 Jul 2016 17:19:57 +0100 Subject: sched: functions: cope with processes whose string is a substring of another process We can't use .startswith() to match processes since sometimes a process name is a substring of another process name. For example, in a trace with processes "wmig" (pid 3268) and "wmig1" (pid 3269), if we use .startswith("wmig"), the second process matches which is wrong. In this trace, the only pid for the wmig process is 3268. --- bart/sched/functions.py | 2 +- tests/test_sched_functions.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/bart/sched/functions.py b/bart/sched/functions.py index 8a3d7a0..cb3336e 100644 --- a/bart/sched/functions.py +++ b/bart/sched/functions.py @@ -443,7 +443,7 @@ def get_pids_for_process(ftrace, execname, cls=None): event = getattr(ftrace, cls.name) df = event.data_frame - mask = df["next_comm"].apply(lambda x : True if x.startswith(execname) else False) + mask = df["next_comm"].apply(lambda x : True if x == execname else False) return list(np.unique(df[mask]["next_pid"].values)) def get_task_name(ftrace, pid, cls=None): diff --git a/tests/test_sched_functions.py b/tests/test_sched_functions.py index 164df3c..1a8d4ac 100644 --- a/tests/test_sched_functions.py +++ b/tests/test_sched_functions.py @@ -37,3 +37,33 @@ class TestSchedFunctions(utils_tests.SetupDirectory): trace = trappy.FTrace(trace_file) with self.assertRaises(ValueError): get_pids_for_process(trace, "foo") + + def test_get_pids_for_process_funny_process_names(self): + """get_pids_for_process() works when a process name is a substring of another""" + from bart.sched.functions import get_pids_for_process + + trace_file = "trace.txt" + raw_trace_file = "trace.raw.txt" + in_data = """ -0 [001] 10826.894644: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=0 next_comm=rt-app next_pid=3268 next_prio=120 + wmig-3268 [001] 10826.894778: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=rt-app next_pid=3269 next_prio=120 + wmig1-3269 [001] 10826.905152: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=1 next_comm=wmig next_pid=3268 next_prio=120 + wmig-3268 [001] 10826.915384: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/1 next_pid=0 next_prio=120 + -0 [005] 10826.995169: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 + wmig1-3269 [005] 10827.007064: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 + wmig-3268 [005] 10827.019061: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120 + wmig1-3269 [005] 10827.031061: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120 + wmig-3268 [005] 10827.050645: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/5 next_pid=0 next_prio=120 +""" + + # We create an empty trace.txt to please trappy ... + with open(trace_file, "w") as fout: + fout.write("") + + # ... but we only put the sched_switch events in the raw trace + # file because that's where trappy is going to look for + with open(raw_trace_file, "w") as fout: + fout.write(in_data) + + trace = trappy.FTrace(trace_file) + + self.assertEquals(get_pids_for_process(trace, "wmig"), [3268]) -- cgit v1.2.3 From 46cbd4f6176ef351ba6d060cb94e8db8ac461c0e Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 1 Aug 2016 16:33:47 +0100 Subject: version: release bart 1.6.0 --- bart/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/version.py b/bart/version.py index da8af07..028405a 100644 --- a/bart/version.py +++ b/bart/version.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "1.5.0" +__version__ = "1.6.0" -- cgit v1.2.3 From af6f4c13472388f5512fc6a79e7c26145510a726 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 11 Aug 2016 16:41:42 +0100 Subject: sched: SchedAssert: fix typo in module description --- bart/sched/SchedAssert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index b0884f3..d716c96 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -14,7 +14,7 @@ # """ -:mod:`bart.sched.SchedAssert` provides ability of assert scheduler behaviour. +:mod:`bart.sched.SchedAssert` provides ability to assert scheduler behaviour. The analysis is based on TRAPpy's statistics framework and is potent enough to aggregate statistics over processor hierarchies. """ -- cgit v1.2.3 From d77008c8d87eadb88b01ceaeda70650b7d364c8c Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 11 Aug 2016 17:59:06 +0100 Subject: sched: SchedAssert: fix type documentation for the level parameter The type of the level parameter for getResidency(), assertResidency() and assertSwitch() is not displayed in the HTML documentation. trappy.stats.Topology expects a string, so report that as the type. --- bart/sched/SchedAssert.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index d716c96..dd0b427 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -149,7 +149,7 @@ class SchedAssert(object): of the task :param level: The topological level to which the group belongs - :type level (hashable): + :type level: str :param node: The group of CPUs for which residency needs to calculated @@ -195,7 +195,7 @@ class SchedAssert(object): percent=False): """ :param level: The topological level to which the group belongs - :type level (hashable): + :type level: str :param node: The group of CPUs for which residency needs to calculated @@ -324,7 +324,7 @@ class SchedAssert(object): :code:`from_node` to the :code:`to_node`: :param level: The topological level to which the group belongs - :type level (hashable): + :type level: str :param from_node: The node from which the task switches out :type from_node: list -- cgit v1.2.3 From 956b609efec4c4823fa4a5c353eef0feba8a30c7 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 11 Aug 2016 17:54:23 +0100 Subject: sched: SchedMultiAssert: add getCPUBusyTime For multiple tasks, it's valuable to verify that they have ran on a set of cpus for a given time. With SchedMultiAssert.getCPUBusyTime() we can do that now! --- bart/sched/SchedMultiAssert.py | 38 ++++++++++++++++++++++++++++++++++++++ tests/test_sched_assert.py | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py index bc37798..32ea17d 100755 --- a/bart/sched/SchedMultiAssert.py +++ b/bart/sched/SchedMultiAssert.py @@ -227,6 +227,44 @@ class SchedMultiAssert(object): else: return result + def getCPUBusyTime(self, level, node, window=None, percent=False): + """Get the amount of time the cpus in the system were busy executing the + tasks + + :param level: The topological level to which the group belongs + :type level: string + + :param node: The group of CPUs for which to calculate busy time + :type node: list + + :param window: A (start, end) tuple to limit the scope of the + calculation. + :type window: tuple + + :param percent: If True the result is normalized to the total + time of the period, either the window or the full lenght of + the trace. + :type percent: bool + + .. math:: + + R = \\frac{T_{busy} \\times 100}{T_{total}} + + """ + residencies = self.getResidency(level, node, window=window) + + busy_time = sum(v["residency"] for v in residencies.itervalues()) + + if percent: + if window: + total_time = window[1] - window[0] + else: + total_time = self._ftrace.get_duration() + num_cpus = len(node) + return busy_time / (total_time * num_cpus) * 100 + else: + return busy_time + def generate_events(self, level, window=None): """Generate Events for the trace plot diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py index f746a77..029dde6 100644 --- a/tests/test_sched_assert.py +++ b/tests/test_sched_assert.py @@ -14,13 +14,14 @@ # +from bart.sched.SchedAssert import SchedAssert +from bart.sched.SchedMultiAssert import SchedMultiAssert import trappy from trappy.stats.Topology import Topology import unittest import utils_tests -from bart.sched.SchedAssert import SchedAssert @unittest.skipUnless(utils_tests.trace_cmd_installed(), "trace-cmd not installed") @@ -68,3 +69,41 @@ class TestSchedAssert(utils_tests.SetupDirectory): expected_time = 0.000817 self.assertAlmostEqual(s.getRuntime(window=window), expected_time, places=9) + +class TestSchedMultiAssert(utils_tests.SetupDirectory): + def __init__(self, *args, **kwargs): + self.big = [1,2] + self.little = [0, 3, 4, 5] + self.clusters = [self.big, self.little] + self.all_cpus = sorted(self.big + self.little) + self.topology = Topology(clusters=self.clusters) + super(TestSchedMultiAssert, self).__init__( + [("raw_trace.dat", "trace.dat")], + *args, + **kwargs) + + def test_cpu_busy_time(self): + """SchedMultiAssert.getCPUBusyTime() work""" + + # precalculated values against these processes in the trace + pids = [4729, 4734] + first_time = .000214 + last_time = .003171 + + tr = trappy.FTrace() + sma = SchedMultiAssert(tr, self.topology, pids=pids) + + expected_busy_time = 0.0041839999754810708 + busy_time = sma.getCPUBusyTime("all", self.all_cpus, window=(first_time, last_time)) + self.assertAlmostEqual(busy_time, expected_busy_time) + + # percent calculation + expected_busy_pct = 23.582459561949445 + busy_pct= sma.getCPUBusyTime("all", self.all_cpus, percent=True, + window=(first_time, last_time)) + self.assertAlmostEqual(busy_pct, expected_busy_pct) + + # percent without a window + expected_busy_pct = 23.018818156540004 + busy_pct= sma.getCPUBusyTime("cluster", self.little, percent=True) + self.assertAlmostEqual(busy_pct, expected_busy_pct) -- cgit v1.2.3 From 39380260c2050c6844822cff385c8bc5e3a898b6 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 18 Aug 2016 11:20:08 +0100 Subject: sched: SchedAssert: add getLastCpu() Similar to getFirstCpu(), it's sometimes useful to know which CPU run a given PID for the last time in the trace. --- bart/sched/SchedAssert.py | 13 +++++++++++++ bart/sched/functions.py | 21 +++++++++++++++++++++ tests/test_sched_assert.py | 7 +++++++ 3 files changed, 41 insertions(+) diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py index dd0b427..5ecfec9 100755 --- a/bart/sched/SchedAssert.py +++ b/bart/sched/SchedAssert.py @@ -612,6 +612,19 @@ class SchedAssert(object): cpus = Utils.listify(cpus) return first_cpu in cpus + def getLastCpu(self, window=None): + """Return the last CPU the task ran on""" + + agg = self._aggregator(sched_funcs.last_cpu) + result = agg.aggregate(level="cpu", window=window) + result = list(itertools.chain.from_iterable(result)) + + end_time = max(result) + if not end_time: + return -1 + + return result.index(end_time) + def generate_events(self, level, start_id=0, window=None): """Generate events for the trace plot diff --git a/bart/sched/functions.py b/bart/sched/functions.py index cb3336e..d1b17d4 100644 --- a/bart/sched/functions.py +++ b/bart/sched/functions.py @@ -216,6 +216,27 @@ def first_cpu(series, window=None): else: return [float("inf")] +def last_cpu(series, window=None): + """:func:`aggfunc` to calculate the time of + the last switch out event in the series + This is returned as a vector of unit length + so that it can be aggregated and reduced across + nodes to find the last cpu of a task + + :param series: Input Time Series data + :type series: :mod:`pandas.Series` + + :param window: A tuple indicating a time window + :type window: tuple + """ + series = select_window(series, window) + series = series[series == SCHED_SWITCH_OUT] + + if len(series): + return [series.index.values[-1]] + else: + return [0] + def select_window(series, window): """Helper Function to select a portion of pandas time series diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py index 029dde6..4f8c28b 100644 --- a/tests/test_sched_assert.py +++ b/tests/test_sched_assert.py @@ -70,6 +70,13 @@ class TestSchedAssert(utils_tests.SetupDirectory): self.assertAlmostEqual(s.getRuntime(window=window), expected_time, places=9) + def test_get_last_cpu(self): + """SchedAssert.getLastCpu() gives you the last cpu in which a task ran""" + expected_last_cpu = 5 + + sa = SchedAssert("trace.dat", self.topology, execname="ls") + self.assertEqual(sa.getLastCpu(), expected_last_cpu) + class TestSchedMultiAssert(utils_tests.SetupDirectory): def __init__(self, *args, **kwargs): self.big = [1,2] -- cgit v1.2.3 From caa216a10e21cd9557cdedb91e5479e690cb64a4 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Mon, 5 Sep 2016 11:19:47 +0100 Subject: version: release bart 1.7.0 --- bart/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/version.py b/bart/version.py index 028405a..b0b2fb2 100644 --- a/bart/version.py +++ b/bart/version.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "1.6.0" +__version__ = "1.7.0" -- cgit v1.2.3 From 207088a14315fce5042ddd791674754798c39f16 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Tue, 13 Sep 2016 15:00:59 +0100 Subject: README: fix link to Thermal notebook Signed-off-by: Michele Di Giorgio --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 268d893..51e7c77 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ BART also provides a generic Trace Analysis Language, which allows the user to construct complex relation statements on trace data and assert their expected behaviours. The usage of the Analyzer module can be seen for the thermal behaviours -[here](https://github.com/ARM-software/bart/blob/master/notebooks/thermal/Thermal.ipynb) +[here](https://github.com/ARM-software/bart/blob/master/docs/notebooks/thermal/Thermal.ipynb) # Scheduler Assertions -- cgit v1.2.3 From ae15480355aa4239a26c9c7c257c2d98eecf717e Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 19 Oct 2016 18:41:57 +0100 Subject: travis-ci: Install Cython before Pandas Pandas 0.19 requires Cython but it is not automatically installed by Pip. So do it manually. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index a5cb355..792b7bf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,7 @@ before_install: - sudo dpkg -i trace-cmd_2.4.0-1_amd64.deb install: - pip install matplotlib + - pip install Cython --install-option="--no-cython-compile" - pip install pandas - pip install ipython[all] - pip install --upgrade trappy -- cgit v1.2.3 From acb1762272d338ebbe825059990adcbc0736c4b0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Wed, 19 Oct 2016 18:33:48 +0100 Subject: common: Fix doc for area_under_curve `sign` param --- bart/common/Utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/common/Utils.py b/bart/common/Utils.py index 0240c23..034cf74 100644 --- a/bart/common/Utils.py +++ b/bart/common/Utils.py @@ -77,7 +77,7 @@ def area_under_curve(series, sign=None, method="trapz", step="post"): or negative regions. Can have two values - `"+"` - - `"-"` + - `"="` :type sign: str :param method: The method for area calculation. This can -- cgit v1.2.3 From 8e32e8eb399356b50b3dc34285843e96fa72496e Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Tue, 1 Nov 2016 22:27:42 -0600 Subject: version: release bart 1.8.0 --- bart/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bart/version.py b/bart/version.py index b0b2fb2..c071e8d 100644 --- a/bart/version.py +++ b/bart/version.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "1.7.0" +__version__ = "1.8.0" -- cgit v1.2.3 -- cgit v1.2.3