aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2017-05-15 17:50:39 -0700
committerJoel Fernandes <joelaf@google.com>2017-05-15 17:50:39 -0700
commit232e1a3fdc810f60aedda0e58cd95e47296e0335 (patch)
tree2b15569a708e12b96f469c316143e55dbdbe9434
parent9f6e355e4671f4562bf387c16bf9ccb1eddb4437 (diff)
parent8e32e8eb399356b50b3dc34285843e96fa72496e (diff)
downloadbart-232e1a3fdc810f60aedda0e58cd95e47296e0335.tar.gz
Merge remote-tracking branch 'goog/mirror-upstream-master' into HEADandroid-wear-8.1.0_r1android-vts-8.1_r9android-vts-8.1_r8android-vts-8.1_r7android-vts-8.1_r6android-vts-8.1_r5android-vts-8.1_r4android-vts-8.1_r3android-vts-8.1_r14android-vts-8.1_r13android-vts-8.1_r12android-vts-8.1_r11android-vts-8.1_r10android-security-8.1.0_r93android-security-8.1.0_r92android-security-8.1.0_r91android-security-8.1.0_r90android-security-8.1.0_r89android-security-8.1.0_r88android-security-8.1.0_r87android-security-8.1.0_r86android-security-8.1.0_r85android-security-8.1.0_r84android-security-8.1.0_r83android-security-8.1.0_r82android-cts-8.1_r9android-cts-8.1_r8android-cts-8.1_r7android-cts-8.1_r6android-cts-8.1_r5android-cts-8.1_r4android-cts-8.1_r3android-cts-8.1_r25android-cts-8.1_r24android-cts-8.1_r23android-cts-8.1_r22android-cts-8.1_r21android-cts-8.1_r20android-cts-8.1_r2android-cts-8.1_r19android-cts-8.1_r18android-cts-8.1_r17android-cts-8.1_r16android-cts-8.1_r15android-cts-8.1_r14android-cts-8.1_r13android-cts-8.1_r12android-cts-8.1_r11android-cts-8.1_r10android-cts-8.1_r1android-8.1.0_r9android-8.1.0_r81android-8.1.0_r80android-8.1.0_r8android-8.1.0_r79android-8.1.0_r78android-8.1.0_r77android-8.1.0_r76android-8.1.0_r75android-8.1.0_r74android-8.1.0_r73android-8.1.0_r72android-8.1.0_r71android-8.1.0_r70android-8.1.0_r7android-8.1.0_r69android-8.1.0_r68android-8.1.0_r67android-8.1.0_r66android-8.1.0_r65android-8.1.0_r64android-8.1.0_r63android-8.1.0_r62android-8.1.0_r61android-8.1.0_r60android-8.1.0_r6android-8.1.0_r53android-8.1.0_r52android-8.1.0_r51android-8.1.0_r50android-8.1.0_r5android-8.1.0_r48android-8.1.0_r47android-8.1.0_r46android-8.1.0_r45android-8.1.0_r43android-8.1.0_r42android-8.1.0_r41android-8.1.0_r40android-8.1.0_r4android-8.1.0_r39android-8.1.0_r38android-8.1.0_r37android-8.1.0_r36android-8.1.0_r35android-8.1.0_r33android-8.1.0_r32android-8.1.0_r31android-8.1.0_r30android-8.1.0_r3android-8.1.0_r29android-8.1.0_r28android-8.1.0_r27android-8.1.0_r26android-8.1.0_r25android-8.1.0_r23android-8.1.0_r22android-8.1.0_r21android-8.1.0_r20android-8.1.0_r2android-8.1.0_r19android-8.1.0_r18android-8.1.0_r17android-8.1.0_r16android-8.1.0_r15android-8.1.0_r14android-8.1.0_r13android-8.1.0_r12android-8.1.0_r11android-8.1.0_r10android-8.1.0_r1android-8.0.0_r34android-8.0.0_r33android-8.0.0_r27android-8.0.0_r26android-8.0.0_r25android-8.0.0_r24android-8.0.0_r23android-8.0.0_r22android-8.0.0_r21security-oc-mr1-releaseoreo-mr1-wear-releaseoreo-mr1-vts-releaseoreo-mr1-security-releaseoreo-mr1-s1-releaseoreo-mr1-releaseoreo-mr1-devoreo-mr1-cuttlefish-testingoreo-mr1-cts-releaseoreo-m8-releaseoreo-m7-releaseoreo-m6-s4-releaseoreo-m6-s3-releaseoreo-m6-s2-releaseoreo-m5-releaseoreo-m4-s9-releaseoreo-m4-s8-releaseoreo-m4-s7-releaseoreo-m4-s6-releaseoreo-m4-s5-releaseoreo-m4-s4-releaseoreo-m4-s3-releaseoreo-m4-s2-releaseoreo-m4-s12-releaseoreo-m4-s11-releaseoreo-m4-s10-releaseoreo-m4-s1-releaseoreo-m3-releaseoreo-m2-s5-releaseoreo-m2-s4-releaseoreo-m2-s3-releaseoreo-m2-s2-releaseoreo-m2-s1-releaseoreo-m2-releaseoreo-dr3-releaseoreo-dr2-releaseoreo-dr1-releaseoreo-dr1-dev
-rw-r--r--.gitignore6
-rw-r--r--.travis.yml26
-rw-r--r--LICENSE202
-rw-r--r--README.md119
-rw-r--r--bart/__init__.py21
-rw-r--r--bart/common/Analyzer.py82
-rw-r--r--bart/common/Utils.py287
-rw-r--r--bart/common/__init__.py20
-rw-r--r--bart/common/signal.py300
-rwxr-xr-xbart/sched/SchedAssert.py666
-rwxr-xr-xbart/sched/SchedMatrix.py292
-rwxr-xr-xbart/sched/SchedMultiAssert.py299
-rw-r--r--bart/sched/__init__.py21
-rw-r--r--bart/sched/functions.py621
-rw-r--r--bart/thermal/ThermalAssert.py135
-rw-r--r--bart/thermal/__init__.py19
-rw-r--r--bart/version.py16
-rw-r--r--docs/api_reference/.gitignore3
-rw-r--r--docs/api_reference/Makefile196
-rw-r--r--docs/api_reference/conf.py381
-rw-r--r--docs/api_reference/index.rst22
-rw-r--r--docs/examples/thermal.py87
-rw-r--r--docs/notebooks/sched/SchedDeadline.ipynb648
-rw-r--r--docs/notebooks/thermal/Thermal.ipynb393
-rw-r--r--setup.cfg2
-rw-r--r--setup.py58
-rw-r--r--tests/raw_trace.datbin0 -> 2437120 bytes
-rw-r--r--tests/test_common_utils.py130
-rw-r--r--tests/test_sched_assert.py116
-rw-r--r--tests/test_sched_functions.py69
-rw-r--r--tests/test_signal.py104
-rw-r--r--tests/trace.raw.txt7
-rw-r--r--tests/trace.txt7
-rw-r--r--tests/utils_tests.py66
34 files changed, 5421 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4ddc47c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+*.pyc
+.ipynb_checkpoints
+example_trace_dat*
+/dist/
+/build/
+/bart_py.egg-info/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..792b7bf
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,26 @@
+language: python
+python:
+ - "2.7"
+before_install:
+ - sudo apt-get update -qq
+ - sudo apt-get install -qq libfreetype6-dev
+ - sudo apt-get install -qq libpng12-dev
+ - wget http://ftp.us.debian.org/debian/pool/main/t/trace-cmd/trace-cmd_2.4.0-1_amd64.deb
+ - sudo dpkg -i trace-cmd_2.4.0-1_amd64.deb
+install:
+ - pip install matplotlib
+ - pip install Cython --install-option="--no-cython-compile"
+ - pip install pandas
+ - pip install ipython[all]
+ - pip install --upgrade trappy
+script: nosetests
+virtualenv:
+ system_site_packages: true
+notifications:
+ email:
+ recipients:
+ - javi.merino@arm.com
+ on_success: never
+ on_failure: always
+cache:
+ - pip
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..51e7c77
--- /dev/null
+++ b/README.md
@@ -0,0 +1,119 @@
+BART [![Build Status](https://travis-ci.org/ARM-software/bart.svg?branch=master)](https://travis-ci.org/ARM-software/bart) [![Version](https://img.shields.io/pypi/v/bart-py.svg)](https://pypi.python.org/pypi/bart-py)
+====
+
+The Behavioural Analysis and Regression Toolkit is based on
+[TRAPpy](https://github.com/ARM-software/trappy). The primary goal is to assert
+behaviours using the FTrace output from the kernel.
+
+## Target Audience
+
+The framework is designed to cater to a wide range of audience. Aiding
+developers as well as automating the testing of "difficult to test" behaviours.
+
+#### Kernel Developers
+
+Making sure that the code that you are writing is doing the right thing.
+
+#### Performance Engineers
+
+Plotting/Asserting performance behaviours between different revisions of the
+kernel.
+
+#### Quality Assurance/Release Engineers
+
+Verifying behaviours when different components/patches are integrated.
+
+# Installation
+
+The following instructions are for Ubuntu 14.04 LTS but they should
+also work with Debian jessie. Older versions of Ubuntu or Debian
+(e.g. Ubuntu 12.04 or Debian wheezy) will likely require to install
+more packages from pip as the ones present in Ubuntu 12.04 or Debian
+wheezy will probably be too old.
+
+## Required dependencies
+
+#### Install additional tools required for some tests and functionalities
+
+ $ sudo apt install trace-cmd kernelshark
+
+#### Install the Python package manager
+
+ $ sudo apt install python-pip python-dev
+
+#### Install required python packages
+
+ $ sudo apt install libfreetype6-dev libpng12-dev python-nose
+ $ sudo pip install numpy matplotlib pandas ipython[all]
+ $ sudo pip install --upgrade trappy
+
+`ipython[all]` will install [IPython
+Notebook](http://ipython.org/notebook.html), a web based interactive
+python programming interface. It is required if you plan to use interactive
+plotting in BART.
+
+#### Install BART
+
+ $ sudo pip install --upgrade bart-py
+
+# For developers
+
+Instead of installing TRAPpy and BART using `pip` you should clone the repositories:
+
+ $ git clone git@github.com:ARM-software/bart.git
+ $ git clone git@github.com:ARM-software/trappy.git
+
+Add the directories to your PYTHONPATH
+
+ $ export PYTHONPATH=$BASE_DIR/bart:$BASE_DIR/trappy:$PYTHONPATH
+
+
+# Trace Analysis Language
+
+BART also provides a generic Trace Analysis Language, which allows the user to
+construct complex relation statements on trace data and assert their expected
+behaviours. The usage of the Analyzer module can be seen for the thermal
+behaviours
+[here](https://github.com/ARM-software/bart/blob/master/docs/notebooks/thermal/Thermal.ipynb)
+
+# Scheduler Assertions
+
+Enables assertion and the calculation of the following parameters:
+
+#### Runtime
+
+The total time that the task spent on a CPU executing.
+
+#### Switch
+
+Assert that a task switched between CPUs/Clusters in a given window of time.
+
+#### Duty Cycle
+
+The ratio of the execution time to the total time.
+
+#### Period
+
+The average difference between two switch-in or two switch-out events of a
+task.
+
+#### First CPU
+
+The first CPU that a task ran on.
+
+#### Residency
+
+Calculate and assert the total residency of a task on a CPU or cluster.
+
+#### Examples
+
+The Scheduler assertions also use TRAPpy's EventPlot to provide a `kernelshark`
+like timeline for the tasks under consideration. (in IPython notebooks).
+
+A notebook explaining the usage of the framework for asserting the deadline
+scheduler behaviours can be seen
+[here](https://rawgit.com/sinkap/0abbcc4918eb228b8887/raw/a1b4d6e0079f4ea0368d595d335bc340616501ff/SchedDeadline.html).
+
+# API reference
+
+The API reference can be found in https://pythonhosted.org/bart-py
diff --git a/bart/__init__.py b/bart/__init__.py
new file mode 100644
index 0000000..886dbc8
--- /dev/null
+++ b/bart/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Initialization for bart"""
+
+import bart.sched
+import bart.common
+import bart.thermal
+from bart.version import __version__
diff --git a/bart/common/Analyzer.py b/bart/common/Analyzer.py
new file mode 100644
index 0000000..7bb55c9
--- /dev/null
+++ b/bart/common/Analyzer.py
@@ -0,0 +1,82 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Allow the user to assert various conditions
+based on the grammar defined in trappy.stats.grammar. The class is
+also intended to have aggregator based functionality. This is not
+implemented yet.
+"""
+
+from trappy.stats.grammar import Parser
+import warnings
+import numpy as np
+import pandas as pd
+
+# pylint: disable=invalid-name
+
+
+class Analyzer(object):
+
+ """
+ :param data: TRAPpy FTrace Object
+ :type data: :mod:`trappy.ftrace.FTrace`
+
+ :param config: A dictionary of variables, classes
+ and functions that can be used in the statements
+ :type config: dict
+ """
+
+ def __init__(self, data, config, **kwargs):
+ self._parser = Parser(data, config, **kwargs)
+
+ def assertStatement(self, statement, select=None):
+ """Solve the statement for a boolean result
+
+ :param statement: A string representing a valid
+ :mod:`trappy.stats.grammar` statement
+ :type statement: str
+
+ :param select: If the result represents a boolean
+ mask and the data was derived from a TRAPpy event
+ with a pivot value. The :code:`select` can be
+ used to select a particular pivot value
+ :type select: :mod:`pandas.DataFrame` column
+ """
+
+ result = self.getStatement(statement, select=select)
+
+ if isinstance(result, pd.DataFrame):
+ result = result.all().all()
+ elif not(isinstance(result, bool) or isinstance(result, np.bool_)): # pylint: disable=no-member
+ warnings.warn("solution of {} is not boolean".format(statement))
+
+ return result
+
+ def getStatement(self, statement, reference=False, select=None):
+ """Evaluate the statement"""
+
+ result = self._parser.solve(statement)
+
+ # pylint: disable=no-member
+ if np.isscalar(result):
+ return result
+ # pylint: enable=no-member
+
+ if select is not None and len(result):
+ result = result[select]
+ if reference:
+ result = self._parser.ref(result)
+
+ return result
diff --git a/bart/common/Utils.py b/bart/common/Utils.py
new file mode 100644
index 0000000..034cf74
--- /dev/null
+++ b/bart/common/Utils.py
@@ -0,0 +1,287 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Utility functions for sheye"""
+
+import trappy
+import numpy as np
+
+# pylint fails to recognize numpy members.
+# pylint: disable=no-member
+
+def listify(to_select):
+ """Utitlity function to handle both single and
+ list inputs
+ """
+
+ if not isinstance(to_select, list):
+ to_select = [to_select]
+
+ return to_select
+
+def init_ftrace(trace):
+ """Initialize the FTrace Object
+
+ :param trace: Path for the trace file
+ or a trace object
+ :type trace: str, :mod:`trappy.ftrace.FTrace`
+ """
+
+ if isinstance(trace, basestring):
+ return trappy.FTrace(trace)
+
+ elif isinstance(trace, trappy.BareTrace):
+ return trace
+
+ raise ValueError("Invalid trace Object")
+
+def select_window(series, window):
+ """Helper Function to select a portion of
+ pandas time series
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+ """
+
+ if not window:
+ return series
+
+ start, stop = window
+ ix = series.index
+ selector = ((ix >= start) & (ix <= stop))
+ window_series = series[selector]
+ return window_series
+
+def area_under_curve(series, sign=None, method="trapz", step="post"):
+ """Return the area under the time series curve (Integral)
+
+ :param series: The time series to be integrated
+ :type series: :mod:`pandas.Series`
+
+ :param sign: Clip the data for the area in positive
+ or negative regions. Can have two values
+
+ - `"+"`
+ - `"="`
+ :type sign: str
+
+ :param method: The method for area calculation. This can
+ be any of the integration methods supported in `numpy`
+ or `rect`
+ :type param: str
+
+ :param step: The step behaviour for `rect` method
+ :type step: str
+
+ *Rectangular Method*
+
+ - Step: Post
+
+ Consider the following time series data
+
+ .. code::
+
+ 2 *----*----*----+
+ | |
+ 1 | *----*----+
+ |
+ 0 *----*----+
+ 0 1 2 3 4 5 6 7
+
+ .. code::
+
+ import pandas as pd
+ a = [0, 0, 2, 2, 2, 1, 1]
+ s = pd.Series(a)
+
+ The area under the curve is:
+
+ .. math::
+
+ \sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\
+ (2 \\times 3) + (1 \\times 2) = 8
+
+ - Step: Pre
+
+ .. code::
+
+ 2 +----*----*----*
+ | |
+ 1 | +----*----*----+
+ |
+ 0 *----*
+ 0 1 2 3 4 5 6 7
+
+ .. code::
+
+ import pandas as pd
+ a = [0, 0, 2, 2, 2, 1, 1]
+ s = pd.Series(a)
+
+ The area under the curve is:
+
+ .. math::
+
+ \sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\
+ (2 \\times 3) + (1 \\times 3) = 9
+ """
+
+ if sign == "+":
+ series = series.clip_lower(0)
+ elif sign == "=":
+ series = series.clip_upper(0)
+
+ series = series.dropna()
+
+ if method == "rect":
+
+ if step == "post":
+ values = series.values[:-1]
+ elif step == "pre":
+ values = series.values[1:]
+ else:
+ raise ValueError("Invalid Value for step: {}".format(step))
+
+ return float((values * np.diff(series.index)).sum())
+
+ if hasattr(np, method):
+ np_integ_method = getattr(np, method)
+ return np_integ_method(series.values, series.index)
+ else:
+ raise ValueError("Invalid method: {}".format(method))
+
+def interval_sum(series, value=None, step="post"):
+ """A function that returns the sum of the
+ intervals where the value of series is equal to
+ the expected value. Consider the following time
+ series data:
+
+ ====== =======
+ Time Value
+ ====== =======
+ 0 0
+ 1 0
+ 2 1
+ 3 1
+ 4 1
+ 5 1
+ 8 0
+ 9 1
+ 10 0
+ 11 1
+ 12 1
+ ====== =======
+
+ .. note::
+
+ The time/index values, in general, may not be
+ uniform. This causes difference in the
+ the values of :func:`interval_sum` for **step-pre**
+ and **step-post** behaviours
+
+ .. code::
+
+ import pandas
+
+ values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1]
+ index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12]
+ series = pandas.Series(values, index=index)
+
+ The :func:`interval_sum` for the value 1 is calculated differently
+ for **step-post** and **step-pre** behaviours as follows:
+
+ - **Step-Post**
+
+
+ .. code::
+
+ 1 *----*----*----*-------------+ *----+ *----*
+ | | | | |
+ 0 *----*----+ *----+ *----+
+ 0 1 2 3 4 5 6 7 8 9 10 11 12
+
+ .. math::
+
+ (8-2) + (10-9) + (12-11) = 6 + 1 + 1 = 8
+
+ - **Step-Pre**
+
+ .. code::
+
+ 1 +----*----*----*----* +----* +----*----*
+ | | | | |
+ 0 *----* +--------------* +----*
+ 0 1 2 3 4 5 6 7 8 9 10 11 12
+
+ .. math::
+
+ (5-1) + (9-8) + (12-10) = 4 + 1 + 2 = 7
+
+ .. note::
+
+ The asterisks (*) on the plots above represent the values of the time
+ series data and these do not vary between the two step styles
+
+ :param series: The time series data
+ :type series: :mod:`pandas.Series`
+
+ :param value: The value to checked for in the series. If the
+ value is None, the truth value of the elements in the
+ series will be used
+ :type value: element
+
+ :param step: The step behaviour as described above
+ ::
+
+ step="post"
+ step="pre
+ :type step: str
+ """
+
+ index = series.index
+ array = series.values
+
+ time_splits = np.append(np.where(np.diff(array) != 0), len(array) - 1)
+
+ prev = 0
+ time = 0
+ step_post = True
+
+ if step == "pre":
+ step_post = False
+ elif step != "post":
+ raise ValueError("Invalid value for step: {}".format(step))
+
+ for split in time_splits:
+
+ first_val = series.iloc[split]
+ check = (first_val == value) if value else first_val
+ if check:
+ start = prev
+ end = split
+
+ if step_post:
+ end = split + 1 if split < len(series) - 1 else split
+ else:
+ start = prev - 1 if prev > 1 else prev
+
+ time += index[end] - index[start]
+
+ prev = split + 1
+
+ return float(time)
diff --git a/bart/common/__init__.py b/bart/common/__init__.py
new file mode 100644
index 0000000..f42522b
--- /dev/null
+++ b/bart/common/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Initialization for bart.common"""
+
+
+from bart.common import Utils
+from bart.common import Analyzer
diff --git a/bart/common/signal.py b/bart/common/signal.py
new file mode 100644
index 0000000..acf7091
--- /dev/null
+++ b/bart/common/signal.py
@@ -0,0 +1,300 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+**Signals**
+
+ - Definition
+
+ A signal is a string representation of a TRAPpy event and the
+ column in the same event. The signal can be of two types:
+
+ - *Pivoted Signal*
+
+ A pivoted signal has a pivot specified in its event class.
+ This means that the signal in the event is a concatenation of different
+ signals which belong to different **pivot** nodes. The analysis for pivoted
+ signals must be done by decomposing them into pivoted signals for each node.
+
+ For example, an even that represents the load of the CPU can be pivoted on
+ :code:`"cpu"` which should be a column in the event's `DataFrame`
+
+ - *Non-Pivoted Signal*
+
+ A non pivoted signal has an event that has no pivot value associated with it.
+ This probably means that signal has one component and can be analysed without
+ decomposing it into smaller signals.
+
+ - Representation
+
+ The following are valid representations of a signal
+
+ - :code:`"event_name:event_column"`
+ - :code:`"trappy.event.class:event_column"`
+
+"""
+
+from trappy.stats.grammar import Parser
+from trappy.stats import StatConf
+from bart.common.Utils import area_under_curve, interval_sum
+
+# pylint: disable=invalid-name
+# pylint: disable=anomalous-backslash-in-string
+
+class SignalCompare(object):
+
+ """
+ :param data: TRAPpy FTrace Object
+ :type data: :mod:`trappy.ftrace.FTrace`
+
+ :param sig_a: The first signal
+ :type sig_a: str
+
+ :param sig_b: The first signal
+ :type sig_b: str
+
+ :param config: A dictionary of variables, classes
+ and functions that can be used in the statements
+ :type config: dict
+
+ :param method: The method to be used for reindexing data
+ This can be one of the standard :mod:`pandas.DataFrame`
+ methods (eg. pad, bfill, nearest). The default is pad
+ or use the last valid observation.
+ :type method: str
+
+ :param limit: The number of indices a value will be propagated
+ when reindexing. The default is None
+ :type limit: int
+
+ :param fill: Whether to fill the NaNs in the data.
+ The default value is True.
+ :type fill: bool
+
+ .. note::
+
+ Both the signals must have the same pivots. For example:
+
+ - Signal A has a pivot as :code:`"cpu"` which means that
+ the trappy event (:mod:`trappy.base.Base`) has a pivot
+ parameter which is equal to :code:`"cpu"`. Then the signal B
+ should also have :code:`"cpu"` as it's pivot.
+
+ - Signal A and B can both have undefined or None
+ as their pivots
+ """
+
+ def __init__(self, data, sig_a, sig_b, **kwargs):
+
+ self._parser = Parser(
+ data,
+ config=kwargs.pop(
+ "config",
+ None),
+ **kwargs)
+ self._a = sig_a
+ self._b = sig_b
+ self._pivot_vals, self._pivot = self._get_signal_pivots()
+
+ # Concatenate the indices by doing any operation (say add)
+ self._a_data = self._parser.solve(sig_a)
+ self._b_data = self._parser.solve(sig_b)
+
+ def _get_signal_pivots(self):
+ """Internal function to check pivot conditions and
+ return an intersection of pivot on the signals"""
+
+ sig_a_info = self._parser.inspect(self._a)
+ sig_b_info = self._parser.inspect(self._b)
+
+ if sig_a_info["pivot"] != sig_b_info["pivot"]:
+ raise RuntimeError("The pivot column for both signals" +
+ "should be same (%s,%s)"
+ % (sig_a_info["pivot"], sig_b_info["pivot"]))
+
+ if sig_a_info["pivot"]:
+ pivot_vals = set(
+ sig_a_info["pivot_values"]).intersection(sig_b_info["pivot_values"])
+ pivoted = sig_a_info["pivot"]
+ else:
+ pivot_vals = [StatConf.GRAMMAR_DEFAULT_PIVOT]
+ pivoted = False
+
+ return pivot_vals, pivoted
+
+ def conditional_compare(self, condition, **kwargs):
+ """Conditionally compare two signals
+
+ The conditional comparison of signals has two components:
+
+ - **Value Coefficient** :math:`\\alpha_{v}` which measures the difference in values of
+ of the two signals when the condition is true:
+
+ .. math::
+
+ \\alpha_{v} = \\frac{area\_under\_curve(S_A\ |\ C(t)\ is\ true)}
+ {area\_under\_curve(S_B\ |\ C(t)\ is\ true)} \\\\
+
+ \\alpha_{v} = \\frac{\int S_A(\{t\ |\ C(t)\})dt}{\int S_B(\{t\ |\ C(t)\})dt}
+
+ - **Time Coefficient** :math:`\\alpha_{t}` which measures the time during which the
+ condition holds true.
+
+ .. math::
+
+ \\alpha_{t} = \\frac{T_{valid}}{T_{total}}
+
+ :param condition: A condition that returns a truth value and obeys the grammar syntax
+ ::
+
+ "event_x:sig_a > event_x:sig_b"
+
+ :type condition: str
+
+ :param method: The method for area calculation. This can
+ be any of the integration methods supported in `numpy`
+ or `rect`
+ :type param: str
+
+ :param step: The step behaviour for area and time
+ summation calculation
+ :type step: str
+
+ Consider the two signals A and B as follows:
+
+ .. code::
+
+ A = [0, 0, 0, 3, 3, 0, 0, 0]
+ B = [0, 0, 2, 2, 2, 2, 1, 1]
+
+
+ .. code::
+
+
+ A = xxxx
+ 3 *xxxx*xxxx+ B = ----
+ | |
+ 2 *----*----*----+
+ | | |
+ 1 | | *----*----+
+ | | |
+ 0 *x-x-*x-x-+xxxx+ +xxxx*xxxx+
+ 0 1 2 3 4 5 6 7
+
+ The condition:
+
+ .. math::
+
+ A > B
+
+ is valid between T=3 and T=5. Therefore,
+
+ .. math::
+
+ \\alpha_v=1.5 \\\\
+ \\alpha_t=\\frac{2}{7}
+
+ :returns: There are two cases:
+
+ - **Pivoted Signals**
+ ::
+
+ {
+ "pivot_name" : {
+ "pval_1" : (v1,t1),
+ "pval_2" : (v2, t2)
+ }
+ }
+ - **Non Pivoted Signals**
+
+ The tuple of :math:`(\\alpha_v, \\alpha_t)`
+ """
+
+ if self._pivot:
+ result = {self._pivot: {}}
+
+ mask = self._parser.solve(condition)
+ step = kwargs.get("step", "post")
+
+ for pivot_val in self._pivot_vals:
+
+ a_piv = self._a_data[pivot_val]
+ b_piv = self._b_data[pivot_val]
+
+ area = area_under_curve(a_piv[mask[pivot_val]], **kwargs)
+ try:
+ area /= area_under_curve(b_piv[mask[pivot_val]], **kwargs)
+ except ZeroDivisionError:
+ area = float("nan")
+
+ duration = min(a_piv.last_valid_index(), b_piv.last_valid_index())
+ duration -= max(a_piv.first_valid_index(),
+ b_piv.first_valid_index())
+ duration = interval_sum(mask[pivot_val], step=step) / duration
+
+ if self._pivot:
+ result[self._pivot][pivot_val] = area, duration
+ else:
+ result = area, duration
+
+ return result
+
+ def get_overshoot(self, **kwargs):
+ """Special case for :func:`conditional_compare`
+ where the condition is:
+ ::
+
+ "sig_a > sig_b"
+
+ :param method: The method for area calculation. This can
+ be any of the integration methods supported in `numpy`
+ or `rect`
+ :type param: str
+
+ :param step: The step behaviour for calculation of area
+ and time summation
+ :type step: str
+
+ .. seealso::
+
+ :func:`conditional_compare`
+ """
+
+ condition = " ".join([self._a, ">", self._b])
+ return self.conditional_compare(condition, **kwargs)
+
+ def get_undershoot(self, **kwargs):
+ """Special case for :func:`conditional_compare`
+ where the condition is:
+ ::
+
+ "sig_a < sig_b"
+
+ :param method: The method for area calculation. This can
+ be any of the integration methods supported in `numpy`
+ or `rect`
+ :type param: str
+
+ :param step: The step behaviour for calculation of area
+ and time summation
+ :type step: str
+
+ .. seealso::
+
+ :func:`conditional_compare`
+ """
+
+ condition = " ".join([self._a, "<", self._b])
+ return self.conditional_compare(condition, **kwargs)
diff --git a/bart/sched/SchedAssert.py b/bart/sched/SchedAssert.py
new file mode 100755
index 0000000..5ecfec9
--- /dev/null
+++ b/bart/sched/SchedAssert.py
@@ -0,0 +1,666 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+:mod:`bart.sched.SchedAssert` provides ability to assert scheduler behaviour.
+The analysis is based on TRAPpy's statistics framework and is potent enough
+to aggregate statistics over processor hierarchies.
+"""
+
+import trappy
+import itertools
+import math
+from trappy.stats.Aggregator import MultiTriggerAggregator
+from bart.sched import functions as sched_funcs
+from bart.common import Utils
+import numpy as np
+
+# pylint: disable=invalid-name
+# pylint: disable=too-many-arguments
+class SchedAssert(object):
+
+ """The primary focus of this class is to assert and verify
+ predefined scheduler scenarios. This does not compare parameters
+ across runs
+
+ :param ftrace: A single trappy.FTrace object
+ or a path that can be passed to trappy.FTrace
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param topology: A topology that describes the arrangement of
+ CPU's on a system. This is useful for multi-cluster systems
+ where data needs to be aggregated at different topological
+ levels
+ :type topology: :mod:`trappy.stats.Topology.Topology`
+
+ :param execname: The execname of the task to be analysed
+
+ .. note::
+
+ There should be only one PID that maps to the specified
+ execname. If there are multiple PIDs :mod:`bart.sched.SchedMultiAssert`
+ should be used
+
+ :type execname: str
+
+ :param pid: The process ID of the task to be analysed
+ :type pid: int
+
+ .. note:
+
+ One of pid or execname is mandatory. If only execname
+ is specified, The current implementation will fail if
+ there are more than one processes with the same execname
+ """
+
+ def __init__(self, ftrace, topology, execname=None, pid=None):
+
+ ftrace = Utils.init_ftrace(ftrace)
+
+ if not execname and not pid:
+ raise ValueError("Need to specify at least one of pid or execname")
+
+ self.execname = execname
+ self._ftrace = ftrace
+ self._pid = self._validate_pid(pid)
+ self._aggs = {}
+ self._topology = topology
+ self._triggers = sched_funcs.sched_triggers(self._ftrace, self._pid,
+ trappy.sched.SchedSwitch)
+ self.name = "{}-{}".format(self.execname, self._pid)
+
+ def _validate_pid(self, pid):
+ """Validate the passed pid argument"""
+
+ if not pid:
+ pids = sched_funcs.get_pids_for_process(self._ftrace,
+ self.execname)
+
+ if len(pids) != 1:
+ raise RuntimeError(
+ "There should be exactly one PID {0} for {1}".format(
+ pids,
+ self.execname))
+
+ return pids[0]
+
+ elif self.execname:
+
+ pids = sched_funcs.get_pids_for_process(self._ftrace,
+ self.execname)
+ if pid not in pids:
+ raise RuntimeError(
+ "PID {0} not mapped to {1}".format(
+ pid,
+ self.execname))
+ else:
+ self.execname = sched_funcs.get_task_name(self._ftrace, pid)
+
+ return pid
+
+ def _aggregator(self, aggfunc):
+ """
+ Return an aggregator corresponding to the
+ aggfunc, the aggregators are memoized for performance
+
+ :param aggfunc: Function parameter that
+ accepts a :mod:`pandas.Series` object and
+ returns a vector/scalar
+
+ :type: function(:mod:`pandas.Series`)
+ """
+
+ if aggfunc not in self._aggs.keys():
+ self._aggs[aggfunc] = MultiTriggerAggregator(self._triggers,
+ self._topology,
+ aggfunc)
+ return self._aggs[aggfunc]
+
+ def getResidency(self, level, node, window=None, percent=False):
+ """
+ Residency of the task is the amount of time it spends executing
+ a particular group of a topological level. For example:
+ ::
+
+ from trappy.stats.Topology import Topology
+
+ big = [1, 2]
+ little = [0, 3, 4, 5]
+
+ topology = Topology(clusters=[little, big])
+
+ s = SchedAssert(trace, topology, pid=123)
+ s.getResidency("cluster", big)
+
+ This will return the residency of the task on the big cluster. If
+ percent is specified it will be normalized to the total runtime
+ of the task
+
+ :param level: The topological level to which the group belongs
+ :type level: str
+
+ :param node: The group of CPUs for which residency
+ needs to calculated
+ :type node: list
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: If true the result is normalized to the total runtime
+ of the task and returned as a percentage
+ :type percent: bool
+
+ .. math::
+
+ R = \\frac{T_{group} \\times 100}{T_{total}}
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertResidency`
+ """
+
+ # Get the index of the node in the level
+ node_index = self._topology.get_index(level, node)
+
+ agg = self._aggregator(sched_funcs.residency_sum)
+ level_result = agg.aggregate(level=level, window=window)
+
+ node_value = level_result[node_index]
+
+ if percent:
+ total = agg.aggregate(level="all", window=window)[0]
+ node_value = node_value * 100
+ node_value = node_value / total
+
+ return node_value
+
+ def assertResidency(
+ self,
+ level,
+ node,
+ expected_value,
+ operator,
+ window=None,
+ percent=False):
+ """
+ :param level: The topological level to which the group belongs
+ :type level: str
+
+ :param node: The group of CPUs for which residency
+ needs to calculated
+ :type node: list
+
+ :param expected_value: The expected value of the residency
+ :type expected_value: double
+
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.ge
+ assertResidency(level, node, expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getResidency(level, node) >= expected_value
+
+ A custom function can also be passed:
+ ::
+
+ THRESHOLD=5
+ def between_threshold(a, expected):
+ return abs(a - expected) <= THRESHOLD
+
+ :type operator: function
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: If true the result is normalized to the total runtime
+ of the task and returned as a percentage
+ :type percent: bool
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getResidency`
+ """
+ node_value = self.getResidency(level, node, window, percent)
+ return operator(node_value, expected_value)
+
+ def getStartTime(self):
+ """
+ :return: The first time the task ran across all the CPUs
+ """
+
+ agg = self._aggregator(sched_funcs.first_time)
+ result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING)
+ return min(result[0])
+
+ def getEndTime(self):
+ """
+ :return: The first last time the task ran across
+ all the CPUs
+ """
+
+ agg = self._aggregator(sched_funcs.first_time)
+ agg = self._aggregator(sched_funcs.last_time)
+ result = agg.aggregate(level="all", value=sched_funcs.TASK_RUNNING)
+ return max(result[0])
+
+ def _relax_switch_window(self, series, direction, window):
+ """
+ direction == "left"
+ return the last time the task was running
+ if no such time exists in the window,
+ extend the window's left extent to
+ getStartTime
+
+ direction == "right"
+ return the first time the task was running
+ in the window. If no such time exists in the
+ window, extend the window's right extent to
+ getEndTime()
+
+ The function returns a None if
+ len(series[series == TASK_RUNNING]) == 0
+ even in the extended window
+ """
+
+ series = series[series == sched_funcs.TASK_RUNNING]
+ w_series = sched_funcs.select_window(series, window)
+ start, stop = window
+
+ if direction == "left":
+ if len(w_series):
+ return w_series.index.values[-1]
+ else:
+ start_time = self.getStartTime()
+ w_series = sched_funcs.select_window(
+ series,
+ window=(
+ start_time,
+ start))
+
+ if not len(w_series):
+ return None
+ else:
+ return w_series.index.values[-1]
+
+ elif direction == "right":
+ if len(w_series):
+ return w_series.index.values[0]
+ else:
+ end_time = self.getEndTime()
+ w_series = sched_funcs.select_window(series, window=(stop, end_time))
+
+ if not len(w_series):
+ return None
+ else:
+ return w_series.index.values[0]
+ else:
+ raise ValueError("direction should be either left or right")
+
+ def assertSwitch(
+ self,
+ level,
+ from_node,
+ to_node,
+ window,
+ ignore_multiple=True):
+ """
+ This function asserts that there is context switch from the
+ :code:`from_node` to the :code:`to_node`:
+
+ :param level: The topological level to which the group belongs
+ :type level: str
+
+ :param from_node: The node from which the task switches out
+ :type from_node: list
+
+ :param to_node: The node to which the task switches
+ :type to_node: list
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param ignore_multiple: If true, the function will ignore multiple
+ switches in the window, If false the assert will be true if and
+ only if there is a single switch within the specified window
+ :type ignore_multiple: bool
+ """
+
+ from_node_index = self._topology.get_index(level, from_node)
+ to_node_index = self._topology.get_index(level, to_node)
+
+ agg = self._aggregator(sched_funcs.csum)
+ level_result = agg.aggregate(level=level)
+
+ from_node_result = level_result[from_node_index]
+ to_node_result = level_result[to_node_index]
+
+ from_time = self._relax_switch_window(from_node_result, "left", window)
+ if ignore_multiple:
+ to_time = self._relax_switch_window(to_node_result, "left", window)
+ else:
+ to_time = self._relax_switch_window(
+ to_node_result,
+ "right", window)
+
+ if from_time and to_time:
+ if from_time < to_time:
+ return True
+
+ return False
+
+ def getRuntime(self, window=None, percent=False):
+ """Return the Total Runtime of a task
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: If True, the result is returned
+ as a percentage of the total execution time
+ of the run.
+ :type percent: bool
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertRuntime`
+ """
+
+ agg = self._aggregator(sched_funcs.residency_sum)
+ run_time = agg.aggregate(level="all", window=window)[0]
+
+ if percent:
+
+ if window:
+ begin, end = window
+ total_time = end - begin
+ else:
+ total_time = self._ftrace.get_duration()
+
+ run_time = run_time * 100
+ run_time = run_time / total_time
+
+ return run_time
+
+ def assertRuntime(
+ self,
+ expected_value,
+ operator,
+ window=None,
+ percent=False):
+ """Assert on the total runtime of the task
+
+ :param expected_value: The expected value of the runtime
+ :type expected_value: double
+
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.ge
+ assertRuntime(expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getRuntime() >= expected_value
+
+ A custom function can also be passed:
+ ::
+
+ THRESHOLD=5
+ def between_threshold(a, expected):
+ return abs(a - expected) <= THRESHOLD
+
+ :type operator: function
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: If True, the result is returned
+ as a percentage of the total execution time
+ of the run.
+ :type percent: bool
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getRuntime`
+ """
+
+ run_time = self.getRuntime(window, percent)
+ return operator(run_time, expected_value)
+
+ def getPeriod(self, window=None, align="start"):
+ """Return the period of the task in (ms)
+
+ Let's say a task started execution at the following times:
+
+ .. math::
+
+ T_1, T_2, ...T_n
+
+ The period is defined as:
+
+ .. math::
+
+ Median((T_2 - T_1), (T_4 - T_3), ....(T_n - T_{n-1}))
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param align:
+ :code:`"start"` aligns period calculation to switch-in events
+ :code:`"end"` aligns the calculation to switch-out events
+ :type param: str
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertPeriod`
+ """
+
+ agg = self._aggregator(sched_funcs.period)
+ deltas = agg.aggregate(level="all", window=window)[0]
+
+ if not len(deltas):
+ return float("NaN")
+ else:
+ return np.median(deltas) * 1000
+
+ def assertPeriod(
+ self,
+ expected_value,
+ operator,
+ window=None,
+ align="start"):
+ """Assert on the period of the task
+
+ :param expected_value: The expected value of the runtime
+ :type expected_value: double
+
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.ge
+ assertPeriod(expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getPeriod() >= expected_value
+
+ A custom function can also be passed:
+ ::
+
+ THRESHOLD=5
+ def between_threshold(a, expected):
+ return abs(a - expected) <= THRESHOLD
+
+ :param window: A (start, end) tuple to limit the scope of the
+ calculation.
+ :type window: tuple
+
+ :param align:
+ :code:`"start"` aligns period calculation to switch-in events
+ :code:`"end"` aligns the calculation to switch-out events
+ :type param: str
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getPeriod`
+ """
+
+ period = self.getPeriod(window, align)
+ return operator(period, expected_value)
+
+ def getDutyCycle(self, window):
+ """Return the duty cycle of the task
+
+ :param window: A (start, end) tuple to limit the scope of the
+ calculation.
+ :type window: tuple
+
+ Duty Cycle:
+ The percentage of time the task spends executing
+ in the given window of time
+
+ .. math::
+
+ \delta_{cycle} = \\frac{T_{exec} \\times 100}{T_{window}}
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertDutyCycle`
+ """
+
+ return self.getRuntime(window, percent=True)
+
+ def assertDutyCycle(self, expected_value, operator, window):
+ """
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.ge
+ assertPeriod(expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getPeriod() >= expected_value
+
+ A custom function can also be passed:
+ ::
+
+ THRESHOLD=5
+ def between_threshold(a, expected):
+ return abs(a - expected) <= THRESHOLD
+
+ :param window: A (start, end) tuple to limit the scope of the
+ calculation.
+ :type window: tuple
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getDutyCycle`
+
+ """
+ return self.assertRuntime(
+ expected_value,
+ operator,
+ window,
+ percent=True)
+
+ def getFirstCpu(self, window=None):
+ """
+ :return: The first CPU the task ran on
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.assertFirstCPU`
+ """
+
+ agg = self._aggregator(sched_funcs.first_cpu)
+ result = agg.aggregate(level="cpu", window=window)
+ result = list(itertools.chain.from_iterable(result))
+
+ min_time = min(result)
+ if math.isinf(min_time):
+ return -1
+ index = result.index(min_time)
+ return self._topology.get_node("cpu", index)[0]
+
+ def assertFirstCpu(self, cpus, window=None):
+ """
+ Check if the Task started (first ran on in the duration
+ of the trace) on a particular CPU(s)
+
+ :param cpus: A list of acceptable CPUs
+ :type cpus: int, list
+
+ .. seealso:: :mod:`bart.sched.SchedAssert.SchedAssert.getFirstCPU`
+ """
+
+ first_cpu = self.getFirstCpu(window=window)
+ cpus = Utils.listify(cpus)
+ return first_cpu in cpus
+
+ def getLastCpu(self, window=None):
+ """Return the last CPU the task ran on"""
+
+ agg = self._aggregator(sched_funcs.last_cpu)
+ result = agg.aggregate(level="cpu", window=window)
+ result = list(itertools.chain.from_iterable(result))
+
+ end_time = max(result)
+ if not end_time:
+ return -1
+
+ return result.index(end_time)
+
+ def generate_events(self, level, start_id=0, window=None):
+ """Generate events for the trace plot
+
+ .. note::
+ This is an internal function accessed by the
+ :mod:`bart.sched.SchedMultiAssert` class for plotting data
+ """
+
+ agg = self._aggregator(sched_funcs.trace_event)
+ result = agg.aggregate(level=level, window=window)
+ events = []
+
+ for idx, level_events in enumerate(result):
+ if not len(level_events):
+ continue
+ events += np.column_stack((level_events, np.full(len(level_events), idx))).tolist()
+
+ return sorted(events, key = lambda x : x[0])
+
+ def plot(self, level="cpu", window=None, xlim=None):
+ """
+ :return: :mod:`trappy.plotter.AbstractDataPlotter` instance
+ Call :func:`view` to draw the graph
+ """
+
+ if not xlim:
+ if not window:
+ xlim = [0, self._ftrace.get_duration()]
+ else:
+ xlim = list(window)
+
+ events = {}
+ events[self.name] = self.generate_events(level, window)
+ names = [self.name]
+ num_lanes = self._topology.level_span(level)
+ lane_prefix = level.upper() + ": "
+ return trappy.EventPlot(events, names, xlim,
+ lane_prefix=lane_prefix,
+ num_lanes=num_lanes)
diff --git a/bart/sched/SchedMatrix.py b/bart/sched/SchedMatrix.py
new file mode 100755
index 0000000..4595469
--- /dev/null
+++ b/bart/sched/SchedMatrix.py
@@ -0,0 +1,292 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+The SchedMatrix provides an ability to compare two executions
+of benchmarks with multiple processes.
+
+For example, consider a benchmark that spawns 4 identical threads
+and any two threads should exhibit a certain behaviours and the
+remaining another identical but different behaviour.
+
+SchedMatrix creates a Matrix of Scheduler Waveform Correlations
+
+A = Reference Execution
+B = Execution to be Evaluated
+
+.. code::
+
+ +---+ +---+
+ | | | |
+ A1, B3 +---+ +--+ +--------------+
+ +---+ +---+
+ | | | |
+ A2, B4 +--------------+ +--+ +---+
+ +---+ +---+
+ | | | |
+ A3, B1 +---+ +--+ +--------------+
+ +---+ +---+
+ | | | |
+ A4, B2 +--------------+ +--+ +---+
+
+
+**Correlation Matrix**
+
+ === ==== ==== ==== ====
+ B1 B2 B3 B4
+ === ==== ==== ==== ====
+ A1 1 0 1 0
+ A2 0 1 0 1
+ A3 1 0 1 0
+ A4 0 1 0 1
+ === ==== ==== ==== ====
+
+
+Thus a success criteria can be defined as A1 having two similar threads in the
+evaluated execution
+::
+
+ assertSiblings(A1, 2, operator.eq)
+ assertSiblings(A2, 2, operator.eq)
+ assertSiblings(A3, 2, operator.eq)
+ assertSiblings(A4, 2, operator.eq)
+"""
+
+
+import sys
+import trappy
+import numpy as np
+from trappy.stats.Aggregator import MultiTriggerAggregator
+from trappy.stats.Correlator import Correlator
+from bart.sched import functions as sched_funcs
+from bart.common import Utils
+
+POSITIVE_TOLERANCE = 0.80
+
+# pylint: disable=invalid-name
+# pylint: disable=too-many-arguments
+
+
+class SchedMatrix(object):
+
+ """
+ :param reference_trace: The trace file path/ftrace object
+ to be used as a reference
+ :type reference_trace: str, :mod:`trappy.ftrace.FTrace`
+
+ :param trace: The trace file path/ftrace object
+ to be verified
+ :type trace: str, :mod:`trappy.ftrace.FTrace`
+
+ :param topology: A topology that describes the arrangement of
+ CPU's on a system. This is useful for multi-cluster systems
+ where data needs to be aggregated at different topological
+ levels
+ :type topology: :mod:`trappy.stats.Topology.Topology`
+
+ :param execnames: The execnames of the task to be analysed
+
+ A single execname or a list of execnames can be passed.
+ There can be multiple processes associated with a single
+ execname parameter. The execnames are searched using a prefix
+ match.
+ :type execname: list, str
+
+ Consider the following processes which need to be analysed:
+
+ * **Reference Trace**
+
+ ===== ==============
+ PID execname
+ ===== ==============
+ 11 task_1
+ 22 task_2
+ 33 task_3
+ ===== ==============
+
+ * **Trace to be verified**
+
+ ===== ==============
+ PID execname
+ ===== ==============
+ 77 task_1
+ 88 task_2
+ 99 task_3
+ ===== ==============
+
+
+ A :mod:`bart.sched.SchedMatrix.SchedMatrix` instance be created
+ following different ways:
+
+ - Using execname prefix match
+ ::
+
+ SchedMatrix(r_trace, trace, topology,
+ execnames="task_")
+
+ - Individual Task names
+ ::
+
+ SchedMatrix(r_trace, trace, topology,
+ execnames=["task_1", "task_2", "task_3"])
+
+ """
+
+ def __init__(
+ self,
+ reference_trace,
+ trace,
+ topology,
+ execnames,
+ aggfunc=sched_funcs.csum):
+
+ run = Utils.init_ftrace(trace)
+ reference_run = Utils.init_ftrace(reference_trace)
+
+ self._execnames = Utils.listify(execnames)
+ self._reference_pids = self._populate_pids(reference_run)
+ self._pids = self._populate_pids(run)
+ self._dimension = len(self._pids)
+ self._topology = topology
+ self._matrix = self._generate_matrix(run, reference_run, aggfunc)
+
+ if len(self._pids) != len(self._reference_pids):
+ raise RuntimeError(
+ "The runs do not have the same number of PIDs for {0}".format(
+ str(execnames)))
+
+ def _populate_pids(self, run):
+ """Populate the qualifying PIDs from the run"""
+
+ if len(self._execnames) == 1:
+ return sched_funcs.get_pids_for_process(run, self._execnames[0])
+
+ pids = []
+
+ for proc in self._execnames:
+ pids += sched_funcs.get_pids_for_process(run, proc)
+
+ return list(set(pids))
+
+ def _generate_matrix(self, run, reference_run, aggfunc):
+ """Generate the Correlation Matrix"""
+
+ reference_aggs = []
+ aggs = []
+
+ for idx in range(self._dimension):
+
+ reference_aggs.append(
+ MultiTriggerAggregator(
+ sched_funcs.sched_triggers(
+ reference_run,
+ self._reference_pids[idx],
+ trappy.sched.SchedSwitch
+ ),
+ self._topology,
+ aggfunc))
+
+ aggs.append(
+ MultiTriggerAggregator(
+ sched_funcs.sched_triggers(
+ run,
+ self._pids[idx],
+ trappy.sched.SchedSwitch
+ ),
+ self._topology,
+ aggfunc))
+
+ agg_pair_gen = ((r_agg, agg)
+ for r_agg in reference_aggs for agg in aggs)
+
+ # pylint fails to recognize numpy members.
+ # pylint: disable=no-member
+ matrix = np.zeros((self._dimension, self._dimension))
+ # pylint: enable=no-member
+
+ for (ref_result, test_result) in agg_pair_gen:
+ i = reference_aggs.index(ref_result)
+ j = aggs.index(test_result)
+ corr = Correlator(
+ ref_result,
+ test_result,
+ corrfunc=sched_funcs.binary_correlate,
+ filter_gaps=True)
+ _, total = corr.correlate(level="cluster")
+
+ matrix[i][j] = total
+
+ return matrix
+
+ def print_matrix(self):
+ """Print the correlation matrix"""
+
+ # pylint fails to recognize numpy members.
+ # pylint: disable=no-member
+ np.set_printoptions(precision=5)
+ np.set_printoptions(suppress=False)
+ np.savetxt(sys.stdout, self._matrix, "%5.5f")
+ # pylint: enable=no-member
+
+ def getSiblings(self, pid, tolerance=POSITIVE_TOLERANCE):
+ """Return the number of processes in the
+ reference trace that have a correlation
+ greater than tolerance
+
+ :param pid: The PID of the process in the reference
+ trace
+ :type pid: int
+
+ :param tolerance: A correlation value > tolerance
+ will classify the resultant process as a sibling
+ :type tolerance: float
+
+ .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.assertSiblings`
+ """
+
+ ref_pid_idx = self._reference_pids.index(pid)
+ pid_result = self._matrix[ref_pid_idx]
+ return len(pid_result[pid_result > tolerance])
+
+ def assertSiblings(self, pid, expected_value, operator,
+ tolerance=POSITIVE_TOLERANCE):
+ """Assert that the number of siblings in the reference
+ trace match the expected value and the operator
+
+ :param pid: The PID of the process in the reference
+ trace
+ :type pid: int
+
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.eq
+ getSiblings(pid, expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getSiblings(pid) == expected_value
+
+ :param tolerance: A correlation value > tolerance
+ will classify the resultant process as a sibling
+ :type tolerance: float
+
+ .. seealso:: :mod:`bart.sched.SchedMatrix.SchedMatrix.getSiblings`
+ """
+ num_siblings = self.getSiblings(pid, tolerance)
+ return operator(num_siblings, expected_value)
diff --git a/bart/sched/SchedMultiAssert.py b/bart/sched/SchedMultiAssert.py
new file mode 100755
index 0000000..32ea17d
--- /dev/null
+++ b/bart/sched/SchedMultiAssert.py
@@ -0,0 +1,299 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""A library for asserting scheduler scenarios based on the
+statistics aggregation framework"""
+
+import re
+import inspect
+import trappy
+from bart.sched import functions as sched_funcs
+from bart.sched.SchedAssert import SchedAssert
+from bart.common import Utils
+
+class SchedMultiAssert(object):
+ """This is vector assertion class built on top of
+ :mod:`bart.sched.SchedAssert.SchedAssert`
+
+ :param ftrace: A single trappy.FTrace object
+ or a path that can be passed to trappy.FTrace
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param topology: A topology that describes the arrangement of
+ CPU's on a system. This is useful for multi-cluster systems
+ where data needs to be aggregated at different topological
+ levels
+ :type topology: :mod:`trappy.stats.Topology.Topology`
+
+ :param execnames: The execnames of the task to be analysed
+
+ A single execname or a list of execnames can be passed.
+ There can be multiple processes associated with a single
+ execname parameter. The execnames are searched using a prefix
+ match.
+ :type execname: list, str
+
+ :param pids: The process IDs of the tasks to be analysed
+ :type pids: list, int
+
+ Consider the following processes which need to be analysed
+
+ ===== ==============
+ PID execname
+ ===== ==============
+ 11 task_1
+ 22 task_2
+ 33 task_3
+ ===== ==============
+
+ A :mod:`bart.sched.SchedMultiAssert.SchedMultiAssert` instance be created
+ following different ways:
+
+ - Using execname prefix match
+ ::
+
+ SchedMultiAssert(ftrace, topology, execnames="task_")
+
+ - Individual Task names
+ ::
+
+ SchedMultiAssert(ftrace, topology, execnames=["task_1", "task_2", "task_3"])
+
+ - Using Process IDs
+ ::
+
+ SchedMultiAssert(ftrace, topology, pids=[11, 22, 33])
+
+
+ All the functionality provided in :mod:`bart.sched.SchedAssert.SchedAssert` is available
+ in this class with the addition of handling vector assertions.
+
+ For example consider the use of :func:`getDutyCycle`
+ ::
+
+ >>> s = SchedMultiAssert(ftrace, topology, execnames="task_")
+ >>> s.getDutyCycle(window=(start, end))
+ {
+ "11": {
+ "task_name": "task_1",
+ "dutycycle": 10.0
+ },
+ "22": {
+ "task_name": "task_2",
+ "dutycycle": 20.0
+ },
+ "33": {
+ "task_name": "task_3",
+ "dutycycle": 30.0
+ },
+ }
+
+ The assertions can be used in a similar way
+ ::
+
+ >>> import operator as op
+ >>> s = SchedMultiAssert(ftrace, topology, execnames="task_")
+ >>> s.assertDutyCycle(15, op.ge, window=(start, end))
+ {
+ "11": {
+ "task_name": "task_1",
+ "dutycycle": False
+ },
+ "22": {
+ "task_name": "task_2",
+ "dutycycle": True
+ },
+ "33": {
+ "task_name": "task_3",
+ "dutycycle": True
+ },
+ }
+
+ The above result can be coalesced using a :code:`rank` parameter
+ As we know that only 2 processes have duty cycles greater than 15%
+ we can do the following:
+ ::
+
+ >>> import operator as op
+ >>> s = SchedMultiAssert(ftrace, topology, execnames="task_")
+ >>> s.assertDutyCycle(15, op.ge, window=(start, end), rank=2)
+ True
+
+ See :mod:`bart.sched.SchedAssert.SchedAssert` for the available
+ functionality
+ """
+
+ def __init__(self, ftrace, topology, execnames=None, pids=None):
+
+ self._ftrace = Utils.init_ftrace(ftrace)
+ self._topology = topology
+
+ if execnames and pids:
+ raise ValueError('Either pids or execnames must be specified')
+ if execnames:
+ self._execnames = Utils.listify(execnames)
+ self._pids = self._populate_pids()
+ elif pids:
+ self._pids = pids
+ else:
+ raise ValueError('One of PIDs or execnames must be specified')
+
+ self._asserts = self._populate_asserts()
+ self._populate_methods()
+
+ def _populate_asserts(self):
+ """Populate SchedAsserts for the PIDs"""
+
+ asserts = {}
+
+ for pid in self._pids:
+ asserts[pid] = SchedAssert(self._ftrace, self._topology, pid=pid)
+
+ return asserts
+
+ def _populate_pids(self):
+ """Map the input execnames to PIDs"""
+
+ if len(self._execnames) == 1:
+ return sched_funcs.get_pids_for_process(self._ftrace, self._execnames[0])
+
+ pids = []
+
+ for proc in self._execnames:
+ pids += sched_funcs.get_pids_for_process(self._ftrace, proc)
+
+ return list(set(pids))
+
+ def _create_method(self, attr_name):
+ """A wrapper function to create a dispatch function"""
+
+ return lambda *args, **kwargs: self._dispatch(attr_name, *args, **kwargs)
+
+ def _populate_methods(self):
+ """Populate Methods from SchedAssert"""
+
+ for attr_name in dir(SchedAssert):
+ attr = getattr(SchedAssert, attr_name)
+
+ valid_method = attr_name.startswith("get") or \
+ attr_name.startswith("assert")
+ if inspect.ismethod(attr) and valid_method:
+ func = self._create_method(attr_name)
+ setattr(self, attr_name, func)
+
+ def get_task_name(self, pid):
+ """Get task name for the PID"""
+ return self._asserts[pid].execname
+
+
+ def _dispatch(self, func_name, *args, **kwargs):
+ """The dispatch function to call into the SchedAssert
+ Method
+ """
+
+ assert_func = func_name.startswith("assert")
+ num_true = 0
+
+ rank = kwargs.pop("rank", None)
+ result = kwargs.pop("result", {})
+ param = kwargs.pop("param", re.sub(r"assert|get", "", func_name, count=1).lower())
+
+ for pid in self._pids:
+
+ if pid not in result:
+ result[pid] = {}
+ result[pid]["task_name"] = self.get_task_name(pid)
+
+ attr = getattr(self._asserts[pid], func_name)
+ result[pid][param] = attr(*args, **kwargs)
+
+ if assert_func and result[pid][param]:
+ num_true += 1
+
+ if assert_func and rank:
+ return num_true == rank
+ else:
+ return result
+
+ def getCPUBusyTime(self, level, node, window=None, percent=False):
+ """Get the amount of time the cpus in the system were busy executing the
+ tasks
+
+ :param level: The topological level to which the group belongs
+ :type level: string
+
+ :param node: The group of CPUs for which to calculate busy time
+ :type node: list
+
+ :param window: A (start, end) tuple to limit the scope of the
+ calculation.
+ :type window: tuple
+
+ :param percent: If True the result is normalized to the total
+ time of the period, either the window or the full lenght of
+ the trace.
+ :type percent: bool
+
+ .. math::
+
+ R = \\frac{T_{busy} \\times 100}{T_{total}}
+
+ """
+ residencies = self.getResidency(level, node, window=window)
+
+ busy_time = sum(v["residency"] for v in residencies.itervalues())
+
+ if percent:
+ if window:
+ total_time = window[1] - window[0]
+ else:
+ total_time = self._ftrace.get_duration()
+ num_cpus = len(node)
+ return busy_time / (total_time * num_cpus) * 100
+ else:
+ return busy_time
+
+ def generate_events(self, level, window=None):
+ """Generate Events for the trace plot
+
+ .. note::
+ This is an internal function for plotting data
+ """
+
+ events = {}
+ for s_assert in self._asserts.values():
+ events[s_assert.name] = s_assert.generate_events(level, window=window)
+
+ return events
+
+ def plot(self, level="cpu", window=None, xlim=None):
+ """
+ :return: :mod:`trappy.plotter.AbstractDataPlotter` instance
+ Call :func:`view` to draw the graph
+ """
+
+ if not xlim:
+ if not window:
+ xlim = [0, self._ftrace.get_duration()]
+ else:
+ xlim = list(window)
+
+ events = self.generate_events(level, window)
+ names = [s.name for s in self._asserts.values()]
+ num_lanes = self._topology.level_span(level)
+ lane_prefix = level.upper() + ": "
+ return trappy.EventPlot(events, names, xlim,
+ lane_prefix=lane_prefix,
+ num_lanes=num_lanes)
diff --git a/bart/sched/__init__.py b/bart/sched/__init__.py
new file mode 100644
index 0000000..68133da
--- /dev/null
+++ b/bart/sched/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Initialization for bart.sched"""
+
+
+from bart.sched import SchedAssert
+from bart.sched import SchedMultiAssert
+from bart.sched import SchedMatrix
diff --git a/bart/sched/functions.py b/bart/sched/functions.py
new file mode 100644
index 0000000..d1b17d4
--- /dev/null
+++ b/bart/sched/functions.py
@@ -0,0 +1,621 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Scheduler specific Functionality for the
+stats framework
+
+The Scheduler stats aggregation is based on a signal
+which is generated by the combination of two triggers
+from the events with the following parameters
+
+========================= ============ =============
+EVENT VALUE FILTERS
+========================= ============ =============
+:func:`sched_switch` 1 next_pid
+:func:`sched_switch` -1 prev_pid
+========================= ============ =============
+
+Both these Triggers are provided by the event
+:mod:`trappy.sched.SchedSwitch` which correspond to
+the :code:`sched_switch` unique word in the trace
+
+.. seealso:: :mod:`trappy.stats.Trigger.Trigger`
+
+Using the above information the following signals are
+generated.
+
+**EVENT SERIES**
+
+This is a combination of the two triggers as specified
+above and has alternating +/- 1 values and is merely
+a representation of the position in time when the process
+started or stopped running on a CPU
+
+**RESIDENCY SERIES**
+
+This series is a cumulative sum of the event series and
+is a representation of the continuous residency of the
+process on a CPU
+
+The pivot for the aggregators is the CPU on which the
+event occurred on. If N is the number of CPUs in the
+system, N signal for each CPU are generated. These signals
+can then be aggregated by specifying a Topology
+
+.. seealso:: :mod:`trappy.stats.Topology.Topology`
+"""
+
+import numpy as np
+from trappy.stats.Trigger import Trigger
+
+WINDOW_SIZE = 0.0001
+"""A control config for filter events. Some analyses
+may require ignoring of small interruptions"""
+
+# Trigger Values
+SCHED_SWITCH_IN = 1
+"""Value of the event when a task is **switch in**
+or scheduled on a CPU"""
+SCHED_SWITCH_OUT = -1
+"""Value of the event when a task is **switched out**
+or relinquishes a CPU"""
+NO_EVENT = 0
+"""Signifies no event on an event trace"""
+
+# Field Names
+CPU_FIELD = "__cpu"
+"""The column in the sched_switch event that
+indicates the CPU on which the event occurred
+"""
+NEXT_PID_FIELD = "next_pid"
+"""The column in the sched_switch event that
+indicates the PID of the next process to be scheduled
+"""
+PREV_PID_FIELD = "prev_pid"
+"""The column in the sched_switch event that
+indicates the PID of the process that was scheduled
+in
+"""
+TASK_RUNNING = 1
+"""The column in the sched_switch event that
+indicates the CPU on which the event occurred
+"""
+TASK_NOT_RUNNING = 0
+"""In a residency series, a zero indicates
+that the task is not running
+"""
+TIME_INVAL = -1
+"""Standard Value to indicate invalid time data"""
+SERIES_SANTIZED = "_sched_sanitized"
+"""A memoized flag which is set when an event series
+is checked for boundary conditions
+"""
+
+
+def sanitize_asymmetry(series, window=None):
+ """Sanitize the cases when a :code:`SWITCH_OUT`
+ happens before a :code:`SWITCH_IN`. (The case when
+ a process is already running before the trace started)
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+ """
+
+ if not hasattr(series, SERIES_SANTIZED):
+
+ events = series[series != 0]
+ if len(series) >= 2 and len(events):
+ if series.values[0] == SCHED_SWITCH_OUT:
+ series.values[0] = TASK_NOT_RUNNING
+
+ elif events.values[0] == SCHED_SWITCH_OUT:
+ series.values[0] = SCHED_SWITCH_IN
+ if window:
+ series.index.values[0] = window[0]
+
+ if series.values[-1] == SCHED_SWITCH_IN:
+ series.values[-1] = TASK_NOT_RUNNING
+
+ elif events.values[-1] == SCHED_SWITCH_IN:
+ series.values[-1] = SCHED_SWITCH_OUT
+ if window:
+ series.index.values[-1] = window[1]
+
+ # No point if the series just has one value and
+ # one event. We do not have sufficient data points
+ # for any calculation. We should Ideally never reach
+ # here.
+ elif len(series) == 1:
+ series.values[0] = 0
+
+ setattr(series, SERIES_SANTIZED, True)
+
+ return series
+
+
+def csum(series, window=None, filter_gaps=False):
+ """:func:`aggfunc` for the cumulative sum of the
+ input series data
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :param filter_gaps: If set, a process being switched out
+ for :mod:`bart.sched.functions.WINDOW_SIZE` is
+ ignored. This is helpful when small interruptions need
+ to be ignored to compare overall correlation
+ :type filter_gaps: bool
+ """
+
+ if filter_gaps:
+ series = filter_small_gaps(series)
+
+ series = series.cumsum()
+ return select_window(series, window)
+
+def filter_small_gaps(series):
+ """A helper function that does filtering of gaps
+ in residency series < :mod:`bart.sched.functions.WINDOW_SIZE`
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+ """
+
+ start = None
+ for index, value in series.iteritems():
+
+ if value == SCHED_SWITCH_IN:
+ if start == None:
+ continue
+
+ if index - start < WINDOW_SIZE:
+ series[start] = NO_EVENT
+ series[index] = NO_EVENT
+ start = None
+
+ if value == SCHED_SWITCH_OUT:
+ start = index
+
+ return series
+
+def first_cpu(series, window=None):
+ """:func:`aggfunc` to calculate the time of
+ the first switch in event in the series
+ This is returned as a vector of unit length
+ so that it can be aggregated and reduced across
+ nodes to find the first cpu of a task
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+ """
+ series = select_window(series, window)
+ series = series[series == SCHED_SWITCH_IN]
+ if len(series):
+ return [series.index.values[0]]
+ else:
+ return [float("inf")]
+
+def last_cpu(series, window=None):
+ """:func:`aggfunc` to calculate the time of
+ the last switch out event in the series
+ This is returned as a vector of unit length
+ so that it can be aggregated and reduced across
+ nodes to find the last cpu of a task
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+ """
+ series = select_window(series, window)
+ series = series[series == SCHED_SWITCH_OUT]
+
+ if len(series):
+ return [series.index.values[-1]]
+ else:
+ return [0]
+
+def select_window(series, window):
+ """Helper Function to select a portion of
+ pandas time series
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+ """
+
+ if not window:
+ return series
+
+ start, stop = window
+ ix = series.index
+ selector = ((ix >= start) & (ix <= stop))
+ window_series = series[selector]
+ return window_series
+
+def residency_sum(series, window=None):
+ """:func:`aggfunc` to calculate the total
+ residency
+
+
+ The input series is processed for
+ intervals between a :mod:`bart.sched.functions.SCHED_SWITCH_OUT`
+ and :mod:`bart.sched.functions.SCHED_SWITCH_IN` to track
+ additive residency of a task
+
+ .. math::
+
+ S_{in} = i_{1}, i_{2}...i_{N} \\\\
+ S_{out} = o_{1}, o_{2}...o_{N} \\\\
+ R_{total} = \sum_{k}^{N}\Delta_k = \sum_{k}^{N}(o_{k} - i_{k})
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :return: A scalar float value
+ """
+
+ if not len(series):
+ return 0.0
+
+ org_series = series
+ series = select_window(series, window)
+ series = sanitize_asymmetry(series, window)
+
+ s_in = series[series == SCHED_SWITCH_IN]
+ s_out = series[series == SCHED_SWITCH_OUT]
+
+ if not (len(s_in) and len(s_out)):
+ try:
+ org_series = sanitize_asymmetry(org_series)
+ running = select_window(org_series.cumsum(), window)
+ if running.values[0] == TASK_RUNNING and running.values[-1] == TASK_RUNNING:
+ return window[1] - window[0]
+ except Exception,e:
+ pass
+
+ if len(s_in) != len(s_out):
+ raise RuntimeError(
+ "Unexpected Lengths: s_in={}, s_out={}".format(
+ len(s_in),
+ len(s_out)))
+ else:
+ return np.sum(s_out.index.values - s_in.index.values)
+
+
+def first_time(series, value, window=None):
+ """:func:`aggfunc` to:
+
+ - Return the first index where the
+ series == value
+
+ - If no such index is found
+ +inf is returned
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :return: A vector of Unit Length
+ """
+
+ series = select_window(series, window)
+ series = series[series == value]
+
+ if not len(series):
+ return [float("inf")]
+
+ return [series.index.values[0]]
+
+
+def period(series, align="start", window=None):
+ """This :func:`aggfunc` returns a tuple
+ of the average duration between two triggers:
+
+ - When :code:`align=start` the :code:`SCHED_IN`
+ trigger is used
+
+ - When :code:`align=end` the :code:`SCHED_OUT`
+ trigger is used
+
+
+ .. math::
+
+ E = e_{1}, e_{2}...e_{N} \\\\
+ T_p = \\frac{\sum_{j}^{\lfloor N/2 \\rfloor}(e_{2j + 1} - e_{2j})}{N}
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :return:
+ A list of deltas of successive starts/stops
+ of a task
+
+ """
+
+ series = select_window(series, window)
+ series = sanitize_asymmetry(series, window)
+
+ if align == "start":
+ series = series[series == SCHED_SWITCH_IN]
+ elif align == "end":
+ series = series[series == SCHED_SWITCH_OUT]
+
+ if len(series) % 2 == 0:
+ series = series[:1]
+
+ if not len(series):
+ return []
+
+ return list(np.diff(series.index.values))
+
+def last_time(series, value, window=None):
+ """:func:`aggfunc` to:
+
+ - The first index where the
+ series == value
+
+ - If no such index is found
+ :mod:`bart.sched.functions.TIME_INVAL`
+ is returned
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :return: A vector of Unit Length
+ """
+
+ series = select_window(series, window)
+ series = series[series == value]
+ if not len(series):
+ return [TIME_INVAL]
+
+ return [series.index.values[-1]]
+
+
+def binary_correlate(series_x, series_y):
+ """Helper function to Correlate binary Data
+
+ Both the series should have same indices
+
+ For binary time series data:
+
+ .. math::
+
+ \\alpha_{corr} = \\frac{N_{agree} - N_{disagree}}{N}
+
+ :param series_x: First time Series data
+ :type series_x: :mod:`pandas.Series`
+
+ :param series_y: Second time Series data
+ :type series_y: :mod:`pandas.Series`
+ """
+
+ if len(series_x) != len(series_y):
+ raise ValueError("Cannot compute binary correlation for \
+ unequal vectors")
+
+ agree = len(series_x[series_x == series_y])
+ disagree = len(series_x[series_x != series_y])
+
+ return (agree - disagree) / float(len(series_x))
+
+def get_pids_for_process(ftrace, execname, cls=None):
+ """Get the PIDs for a given process
+
+ :param ftrace: A ftrace object with a sched_switch
+ event
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param execname: The name of the process
+ :type execname: str
+
+ :param cls: The SchedSwitch event class (required if
+ a different event is to be used)
+ :type cls: :mod:`trappy.base.Base`
+
+ :return: The set of PIDs for the execname
+ """
+
+ if not cls:
+ try:
+ df = ftrace.sched_switch.data_frame
+ except AttributeError:
+ raise ValueError("SchedSwitch event not found in ftrace")
+
+ if len(df) == 0:
+ raise ValueError("SchedSwitch event not found in ftrace")
+ else:
+ event = getattr(ftrace, cls.name)
+ df = event.data_frame
+
+ mask = df["next_comm"].apply(lambda x : True if x == execname else False)
+ return list(np.unique(df[mask]["next_pid"].values))
+
+def get_task_name(ftrace, pid, cls=None):
+ """Returns the execname for pid
+
+ :param ftrace: A ftrace object with a sched_switch
+ event
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param pid: The PID of the process
+ :type pid: int
+
+ :param cls: The SchedSwitch event class (required if
+ a different event is to be used)
+ :type cls: :mod:`trappy.base.Base`
+
+ :return: The execname for the PID
+ """
+
+ if not cls:
+ try:
+ df = ftrace.sched_switch.data_frame
+ except AttributeError:
+ raise ValueError("SchedSwitch event not found in ftrace")
+ else:
+ event = getattr(ftrace, cls.name)
+ df = event.data_frame
+
+ df = df[df["next_pid"] == pid]
+ if not len(df):
+ return ""
+ else:
+ return df["next_comm"].values[0]
+
+def sched_triggers(ftrace, pid, sched_switch_class):
+ """Returns the list of sched_switch triggers
+
+ :param ftrace: A ftrace object with a sched_switch
+ event
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param pid: The PID of the associated process
+ :type pid: int
+
+ :param sched_switch_class: The SchedSwitch event class
+ :type sched_switch_class: :mod:`trappy.base.Base`
+
+ :return: List of triggers, such that
+ ::
+
+ triggers[0] = switch_in_trigger
+ triggers[1] = switch_out_trigger
+ """
+
+ if not hasattr(ftrace, "sched_switch"):
+ raise ValueError("SchedSwitch event not found in ftrace")
+
+ triggers = []
+ triggers.append(sched_switch_in_trigger(ftrace, pid, sched_switch_class))
+ triggers.append(sched_switch_out_trigger(ftrace, pid, sched_switch_class))
+ return triggers
+
+def sched_switch_in_trigger(ftrace, pid, sched_switch_class):
+ """
+ :param ftrace: A ftrace object with a sched_switch
+ event
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param pid: The PID of the associated process
+ :type pid: int
+
+ :param sched_switch_class: The SchedSwitch event class
+ :type sched_switch_class: :mod:`trappy.base.Base`
+
+ :return: :mod:`trappy.stats.Trigger.Trigger` on
+ the SchedSwitch: IN for the given PID
+ """
+
+ task_in = {}
+ task_in[NEXT_PID_FIELD] = pid
+
+ return Trigger(ftrace,
+ sched_switch_class, # trappy Event Class
+ task_in, # Filter Dictionary
+ SCHED_SWITCH_IN, # Trigger Value
+ CPU_FIELD) # Primary Pivot
+
+def sched_switch_out_trigger(ftrace, pid, sched_switch_class):
+ """
+ :param ftrace: A ftrace object with a sched_switch
+ event
+ :type ftrace: :mod:`trappy.ftrace.FTrace`
+
+ :param pid: The PID of the associated process
+ :type pid: int
+
+ :param sched_switch_class: The SchedSwitch event class
+ :type sched_switch_class: :mod:`trappy.base.Base`
+
+ :return: :mod:`trappy.stats.Trigger.Trigger` on
+ the SchedSwitch: OUT for the given PID
+ """
+
+ task_out = {}
+ task_out[PREV_PID_FIELD] = pid
+
+ return Trigger(ftrace,
+ sched_switch_class, # trappy Event Class
+ task_out, # Filter Dictionary
+ SCHED_SWITCH_OUT, # Trigger Value
+ CPU_FIELD) # Primary Pivot
+
+
+def trace_event(series, window=None):
+ """
+ :func:`aggfunc` to be used for plotting
+ the process residency data using
+ :mod:`trappy.plotter.EventPlot`
+
+ :param series: Input Time Series data
+ :type series: :mod:`pandas.Series`
+
+ :param window: A tuple indicating a time window
+ :type window: tuple
+
+ :return: A list of events
+ of the type:
+ ::
+
+ [
+ [start_time_1, stop_time_1],
+ [start_time_2, stop_time_2],
+ #
+ #
+ [start_time_N, stop_time_N],
+ ]
+ """
+ rects = []
+ series = select_window(series, window)
+ series = sanitize_asymmetry(series, window)
+
+ s_in = series[series == SCHED_SWITCH_IN]
+ s_out = series[series == SCHED_SWITCH_OUT]
+
+ if not len(s_in):
+ return rects
+
+ if len(s_in) != len(s_out):
+ raise RuntimeError(
+ "Unexpected Lengths: s_in={}, s_out={}".format(
+ len(s_in),
+ len(s_out)))
+
+ return np.column_stack((s_in.index.values, s_out.index.values))
diff --git a/bart/thermal/ThermalAssert.py b/bart/thermal/ThermalAssert.py
new file mode 100644
index 0000000..d0ffa78
--- /dev/null
+++ b/bart/thermal/ThermalAssert.py
@@ -0,0 +1,135 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""A thermal specific library to assert certain thermal
+behaviours
+"""
+
+from bart.common import Utils
+from bart.common.Analyzer import Analyzer
+import numpy as np
+
+
+# pylint: disable=invalid-name
+# pylint: disable=too-many-arguments
+class ThermalAssert(object):
+
+ """A class that accepts a TRAPpy FTrace object and
+ provides assertions for thermal behaviours
+
+ :param ftrace: A path to the trace file or a TRAPpy FTrace object
+ :type ftrace: str, :mod:`trappy.ftrace.FTrace`
+ """
+
+ def __init__(self, ftrace, config=None):
+
+ self._ftrace = Utils.init_ftrace(ftrace)
+ self._analyzer = Analyzer(self._ftrace, config)
+
+ def getThermalResidency(self, temp_range, window, percent=False):
+ """Return the total time spent in a given temperature range
+
+ :param temp_range: A tuple of (low_temp, high_temp)
+ which specifies the range of temperature that
+ one intends to calculate the residency for.
+ :type temp_range: tuple
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: Returns the residency as a percentage of the total
+ duration of the trace
+ :type percent: bool
+
+ .. seealso:
+
+ :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency`
+ """
+
+ # Get a pivoted thermal temperature data using the grammar
+ data = self._analyzer.getStatement("trappy.thermal.Thermal:temp")
+
+ result = {}
+ for pivot, data_frame in data.groupby(axis=1, level=0):
+
+ series = data_frame[pivot]
+ series = Utils.select_window(series, window)
+ mask = (series >= temp_range[0]) & (series <= temp_range[1])
+ index = series.index.values
+ # pylint fails to recognize numpy members.
+ # pylint: disable=no-member
+ shift_index = np.roll(index, 1)
+ # pylint: enable=no-member
+ shift_index[0] = 0
+
+ result[pivot] = sum((index - shift_index)[mask.values])
+
+ if percent:
+ result[pivot] = (
+ result[pivot] * 100.0) / self._ftrace.get_duration()
+
+ return result
+
+ def assertThermalResidency(
+ self,
+ expected_value,
+ operator,
+ temp_range,
+ window,
+ percent=False):
+ """
+ :param expected_value: The expected value of the residency
+ :type expected_value: double
+
+ :param operator: A binary operator function that returns
+ a boolean. For example:
+ ::
+
+ import operator
+ op = operator.ge
+ assertThermalResidency(temp_range, expected_value, op)
+
+ Will do the following check:
+ ::
+
+ getThermalResidency(temp_range) >= expected_value
+
+ A custom function can also be passed:
+ ::
+
+ THRESHOLD=5
+ def between_threshold(a, expected):
+ return abs(a - expected) <= THRESHOLD
+
+ :param temp_range: A tuple of (low_temp, high_temp)
+ which specifies the range of temperature that
+ one intends to calculate the residency for.
+ :type temp_range: tuple
+
+ :param window: A (start, end) tuple to limit the scope of the
+ residency calculation.
+ :type window: tuple
+
+ :param percent: Returns the residency as a percentage of the total
+ duration of the trace
+ :type percent: bool
+
+ .. seealso:
+
+ :mod:`bart.thermal.ThermalAssert.ThermalAssert.assertThermalResidency`
+ """
+
+ residency = self.getThermalResidency(temp_range, window, percent)
+ return operator(residency, expected_value)
diff --git a/bart/thermal/__init__.py b/bart/thermal/__init__.py
new file mode 100644
index 0000000..6eefbc2
--- /dev/null
+++ b/bart/thermal/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Initialization for bart.thermal"""
+
+
+import bart.thermal.ThermalAssert
diff --git a/bart/version.py b/bart/version.py
new file mode 100644
index 0000000..c071e8d
--- /dev/null
+++ b/bart/version.py
@@ -0,0 +1,16 @@
+# Copyright 2016-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+__version__ = "1.8.0"
diff --git a/docs/api_reference/.gitignore b/docs/api_reference/.gitignore
new file mode 100644
index 0000000..588039e
--- /dev/null
+++ b/docs/api_reference/.gitignore
@@ -0,0 +1,3 @@
+_build
+*.rst
+!index.rst
diff --git a/docs/api_reference/Makefile b/docs/api_reference/Makefile
new file mode 100644
index 0000000..f7c61a3
--- /dev/null
+++ b/docs/api_reference/Makefile
@@ -0,0 +1,196 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+ ls *.rst | grep -v index.rst | xargs rm -f
+
+reference:
+ sphinx-apidoc -f -e -o . ../../bart
+
+html: reference
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml: reference
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml: reference
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle: reference
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json: reference
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp: reference
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp: reference
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BART.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BART.qhc"
+
+applehelp: reference
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp: reference
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/BART"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BART"
+ @echo "# devhelp"
+
+epub: reference
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex: reference
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf: reference
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja: reference
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text: reference
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man: reference
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo: reference
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info: reference
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext: reference
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes: reference
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck: reference
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest: reference
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage: reference
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml: reference
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml: reference
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py
new file mode 100644
index 0000000..32d6653
--- /dev/null
+++ b/docs/api_reference/conf.py
@@ -0,0 +1,381 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# BART documentation build configuration file, created by
+# sphinx-quickstart on Fri Sep 4 11:30:35 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+this_dir = os.path.dirname(__file__)
+sys.path.insert(0, os.path.join(this_dir, '../..'))
+import bart
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.ifconfig',
+ 'sphinx.ext.viewcode']
+
+# Update MathJax path to use the cdnjs using HTTPS
+mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'BART'
+copyright = u'2016, ARM Ltd.'
+author = u'Kapileshwar Singh(KP), Javi Merino'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version. Drop everything after the last "."
+version = bart.__version__[:bart.__version__.rindex(".")]
+# The full version, including alpha/beta/rc tags.
+release = bart.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = 'en'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'classic'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'BARTdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'BART.tex', u'BART Documentation',
+ u'Kapileshwar Singh(KP), Javi Merino', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'bart', u'BART Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'BART', u'BART Documentation',
+ author, 'BART', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# -- Options for Epub output ----------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+epub_author = author
+epub_publisher = author
+epub_copyright = copyright
+
+# The basename for the epub file. It defaults to the project name.
+#epub_basename = project
+
+# The HTML theme for the epub output. Since the default themes are not optimized
+# for small screen space, using the same theme for HTML and epub output is
+# usually not wise. This defaults to 'epub', a theme designed to save visual
+# space.
+#epub_theme = 'epub'
+
+# The language of the text. It defaults to the language option
+# or 'en' if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# A tuple containing the cover image and cover page html template filenames.
+#epub_cover = ()
+
+# A sequence of (type, uri, title) tuples for the guide element of content.opf.
+#epub_guide = ()
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+# Choose between 'default' and 'includehidden'.
+#epub_tocscope = 'default'
+
+# Fix unsupported image types using the Pillow.
+#epub_fix_images = False
+
+# Scale large images.
+#epub_max_image_width = 0
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#epub_show_urls = 'inline'
+
+# If false, no index is generated.
+#epub_use_index = True
diff --git a/docs/api_reference/index.rst b/docs/api_reference/index.rst
new file mode 100644
index 0000000..f21d055
--- /dev/null
+++ b/docs/api_reference/index.rst
@@ -0,0 +1,22 @@
+.. BART documentation master file, created by
+ sphinx-quickstart on Fri Sep 4 12:40:17 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to BART's documentation!
+==================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 4
+
+ bart
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/examples/thermal.py b/docs/examples/thermal.py
new file mode 100644
index 0000000..8fe3e95
--- /dev/null
+++ b/docs/examples/thermal.py
@@ -0,0 +1,87 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+An example file for usage of Analyzer for thermal assertions
+"""
+from bart.common.Analyzer import Analyzer
+from trappy.stats.Topology import Topology
+import unittest
+import trappy
+
+
+class TestThermal(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ # We can run a workload invocation script here
+ # Which then copies the required traces for analysis to
+ # the host.
+ trace_file = "update_a_trace_path_here"
+ ftrace = trappy.FTrace(trace_file, "test_run")
+
+ # Define the parameters that you intend to use in the grammar
+ config = {}
+ config["THERMAL"] = trappy.thermal.Thermal
+ config["OUT"] = trappy.cpu_power.CpuOutPower
+ config["IN"] = trappy.cpu_power.CpuInPower
+ config["PID"] = trappy.pid_controller.PIDController
+ config["GOVERNOR"] = trappy.thermal.ThermalGovernor
+ config["CONTROL_TEMP"] = 77000
+ config["SUSTAINABLE_POWER"] = 2500
+ config["EXPECTED_TEMP_QRT"] = 95
+ config["EXPECTED_STD_PCT"] = 5
+
+ # Define a Topology
+ cls.BIG = '000000f0'
+ cls.LITTLE = '0000000f'
+ cls.tz = 0
+ cls.analyzer = Analyzer(ftrace, config)
+
+ def test_temperature_quartile(self):
+ """Assert Temperature quartile"""
+
+ self.assertTrue(self.analyzer.assertStatement(
+ "numpy.percentile(THERMAL:temp, EXPECTED_TEMP_QRT) < (CONTROL_TEMP + 5000)"))
+
+ def test_average_temperature(self):
+ """Assert Average temperature"""
+
+ self.assertTrue(self.analyzer.assertStatement(
+ "numpy.mean(THERMAL:temp) < CONTROL_TEMP", select=self.tz))
+
+ def test_temp_stdev(self):
+ """Assert StdDev(temp) as % of mean"""
+
+ self.assertTrue(self.analyzer.assertStatement(
+ "(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\
+ < EXPECTED_STD_PCT", select=self.tz))
+
+ def test_zero_load_input_power(self):
+ """Test power demand when load is zero"""
+
+ zero_load_power_big = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
+ & (IN:dynamic_power > 0)", reference=True, select=self.BIG)
+ self.assertEquals(len(zero_load_power_big), 0)
+
+ zero_load_power_little = self.analyzer.getStatement("((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \
+ & (IN:dynamic_power > 0)", reference=True, select=self.LITTLE)
+ self.assertEquals(len(zero_load_power_little), 0)
+
+ def test_sustainable_power(self):
+ """temp > control_temp, allocated_power < sustainable_power"""
+
+ self.analyzer.getStatement("(GOVERNOR:current_temperature > CONTROL_TEMP) &\
+ (PID:output > SUSTAINABLE_POWER)", reference=True, select=0)
diff --git a/docs/notebooks/sched/SchedDeadline.ipynb b/docs/notebooks/sched/SchedDeadline.ipynb
new file mode 100644
index 0000000..95cd0e2
--- /dev/null
+++ b/docs/notebooks/sched/SchedDeadline.ipynb
@@ -0,0 +1,648 @@
+{
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 2",
+ "language": "python",
+ "name": "python2"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 2
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython2",
+ "version": "2.7.9"
+ },
+ "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from trappy.stats.Topology import Topology\n",
+ "from bart.sched.SchedMultiAssert import SchedMultiAssert\n",
+ "from bart.sched.SchedAssert import SchedAssert\n",
+ "import trappy\n",
+ "import os\n",
+ "import operator\n",
+ "import json\n",
+ "\n",
+ "#Define a CPU Topology (for multi-cluster systems)\n",
+ "BIG = [1, 2]\n",
+ "LITTLE = [0, 3, 4, 5]\n",
+ "CLUSTERS = [BIG, LITTLE]\n",
+ "topology = Topology(clusters=CLUSTERS)\n",
+ "\n",
+ "BASE_PATH = \"/Users/kapileshwarsingh/AnalysisRawData/LPC/sched_deadline/\"\n",
+ "\n",
+ "THRESHOLD = 10.0\n",
+ "def between_threshold(a, b):\n",
+ " return abs(((a - b) * 100.0) / b) < THRESHOLD"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Periodic Yield"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The thread periodic_yeild is woken up at 30ms intervals where it calls sched_yield and relinquishes its time-slice.\n",
+ "The expectation is that the task will have a duty cycle < 1% and a period of 30ms.\n",
+ "\n",
+ "There are two threads, and the rank=1 conveys that the condition is true for one of the threads with the name \"periodic_yeild\"\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "TRACE_FILE = os.path.join(BASE_PATH, \"yield\")\n",
+ "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n",
+ "\n",
+ "# Assert Period\n",
+ "s = SchedMultiAssert(ftrace, topology, execnames=\"periodic_yield\")\n",
+ "if s.assertPeriod(30, between_threshold, rank=1):\n",
+ " print \"PASS: Period\"\n",
+ " print json.dumps(s.getPeriod(), indent=3)\n",
+ "\n",
+ "print \"\"\n",
+ " \n",
+ "# Assert DutyCycle \n",
+ "if s.assertDutyCycle(1, operator.lt, window=(0,4), rank=2):\n",
+ " print \"PASS: DutyCycle\"\n",
+ " print json.dumps(s.getDutyCycle(window=(0,4)), indent=3)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "PASS: Period\n",
+ "{\n",
+ " \"1844\": {\n",
+ " \"period\": 1.0085000000401578, \n",
+ " \"task_name\": \"periodic_yield\"\n",
+ " }, \n",
+ " \"1845\": {\n",
+ " \"period\": 29.822017857142669, \n",
+ " \"task_name\": \"periodic_yield\"\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "PASS: DutyCycle\n",
+ "{\n",
+ " \"1844\": {\n",
+ " \"task_name\": \"periodic_yield\", \n",
+ " \"dutycycle\": 0.074749999998857675\n",
+ " }, \n",
+ " \"1845\": {\n",
+ " \"task_name\": \"periodic_yield\", \n",
+ " \"dutycycle\": 0.03862499999343072\n",
+ " }\n",
+ "}\n"
+ ]
+ }
+ ],
+ "prompt_number": 10
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "CPU Hog"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The reservation of a CPU hogging task is set to 10ms for every 100ms. The assertion ensures a duty cycle of 10%"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "TRACE_FILE = os.path.join(BASE_PATH, \"cpuhog\")\n",
+ "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n",
+ "s = SchedMultiAssert(ftrace, topology, execnames=\"cpuhog\")\n",
+ "s.plot().view()\n",
+ "\n",
+ "# Assert DutyCycle\n",
+ "if s.assertDutyCycle(10, between_threshold, window=(0, 5), rank=1):\n",
+ " print \"PASS: DutyCycle\"\n",
+ " print json.dumps(s.getDutyCycle(window=(0, 5)), indent=3)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "<style>\n",
+ "/*\n",
+ "\n",
+ " * Copyright 2015-2015 ARM Limited\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "\n",
+ " * you may not use this file except in compliance with the License.\n",
+ "\n",
+ " * You may obtain a copy of the License at\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * http://www.apache.org/licenses/LICENSE-2.0\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * Unless required by applicable law or agreed to in writing, software\n",
+ "\n",
+ " * distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "\n",
+ " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "\n",
+ " * See the License for the specific language governing permissions and\n",
+ "\n",
+ " * limitations under the License.\n",
+ "\n",
+ " */\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip {\n",
+ "\n",
+ " line-height: 1;\n",
+ "\n",
+ " padding: 12px;\n",
+ "\n",
+ " background: rgba(0, 0, 0, 0.6);\n",
+ "\n",
+ " color: #fff;\n",
+ "\n",
+ " border-radius: 2px;\n",
+ "\n",
+ " position: absolute !important;\n",
+ "\n",
+ " z-index: 99999;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip:after {\n",
+ "\n",
+ " box-sizing: border-box;\n",
+ "\n",
+ " pointer-events: none;\n",
+ "\n",
+ " display: inline;\n",
+ "\n",
+ " font-size: 10px;\n",
+ "\n",
+ " width: 100%;\n",
+ "\n",
+ " line-height: 1;\n",
+ "\n",
+ " color: rgba(0, 0, 0, 0.6);\n",
+ "\n",
+ " content: \"\\25BC\";\n",
+ "\n",
+ " position: absolute !important;\n",
+ "\n",
+ " z-index: 99999;\n",
+ "\n",
+ " text-align: center;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip.n:after {\n",
+ "\n",
+ " margin: -1px 0 0 0;\n",
+ "\n",
+ " top: 100%;\n",
+ "\n",
+ " left: 0;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".chart {\n",
+ "\n",
+ " shape-rendering: crispEdges;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".mini text {\n",
+ "\n",
+ " font: 9px sans-serif;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".main text {\n",
+ "\n",
+ " font: 12px sans-serif;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".axis line, .axis path {\n",
+ "\n",
+ " stroke: black;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".miniItem {\n",
+ "\n",
+ " stroke-width: 8;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".brush .extent {\n",
+ "\n",
+ "\n",
+ "\n",
+ " stroke: #000;\n",
+ "\n",
+ " fill-opacity: .125;\n",
+ "\n",
+ " shape-rendering: crispEdges;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "</style>\n",
+ "<div id=\"fig_41c7653cedde4765ae1f166e75c4fb08\" class=\"eventplot\">\n",
+ " <script>\n",
+ " var req = require.config( {\n",
+ "\n",
+ " paths: {\n",
+ "\n",
+ " \"EventPlot\": \"https://rawgit.com/sinkap/7f89de3e558856b81f10/raw/46144f8f8c5da670c54f826f0c634762107afc66/EventPlot\",\n",
+ " \"d3-tip\": \"http://labratrevenge.com/d3-tip/javascripts/d3.tip.v0.6.3\",\n",
+ " \"d3\": \"http://d3js.org/d3.v3.min\"\n",
+ " },\n",
+ " shim: {\n",
+ " \"d3-tip\": [\"d3\"],\n",
+ " \"EventPlot\": {\n",
+ "\n",
+ " \"deps\": [\"d3-tip\", \"d3\" ],\n",
+ " \"exports\": \"EventPlot\"\n",
+ " }\n",
+ " }\n",
+ " });\n",
+ " req([\"require\", \"EventPlot\"], function() {\n",
+ " EventPlot.generate('fig_41c7653cedde4765ae1f166e75c4fb08', 'https://rawgit.com/sinkap/e9bc2394cf322f4dad0d/raw/014fae226c847a467fba541fbc390e18acea127b/fig_41c7653cedde4765ae1f166e75c4fb08.json');\n",
+ " });\n",
+ " </script>\n",
+ " </div>"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "PASS: DutyCycle\n",
+ "{\n",
+ " \"1852\": {\n",
+ " \"task_name\": \"cpuhog\", \n",
+ " \"dutycycle\": 10.050119999991693\n",
+ " }\n",
+ "}\n"
+ ]
+ }
+ ],
+ "prompt_number": 11
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Changing Reservations"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A CPU hogging task has reservations set in the increasing order starting from 10% followed by a 2s period of normal execution"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "TRACE_FILE = os.path.join(BASE_PATH, \"cancel_dl_timer\")\n",
+ "ftrace = trappy.FTrace(TRACE_FILE, \"cpuhog\")\n",
+ "s = SchedAssert(ftrace, topology, execname=\"cpuhog\")\n",
+ "s.plot().view()\n",
+ "\n",
+ "NUM_PHASES = 10\n",
+ "PHASE_DURATION = 2\n",
+ "start = s.getStartTime()\n",
+ "DUTY_CYCLE_FACTOR = 10\n",
+ "\n",
+ "\n",
+ "for phase in range(NUM_PHASES + 1):\n",
+ " window = (start + (phase * PHASE_DURATION),\n",
+ " start + ((phase + 1) * PHASE_DURATION))\n",
+ " \n",
+ " if phase % 2 == 0:\n",
+ " DUTY_CYCLE = (phase + 2) * DUTY_CYCLE_FACTOR / 2\n",
+ " else:\n",
+ " DUTY_CYCLE = 100\n",
+ "\n",
+ "\n",
+ " print \"WINDOW -> [{:.2f}, {:.2f}]\".format(window[0],\n",
+ " window[1])\n",
+ " \n",
+ " \n",
+ " \n",
+ " if s.assertDutyCycle(DUTY_CYCLE, between_threshold, window=window):\n",
+ " print \"PASS: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n",
+ " s.getDutyCycle(window=window),\n",
+ " THRESHOLD)\n",
+ " else:\n",
+ " print \"FAIL: Expected={} Actual={:.2f} THRESHOLD={}\".format(DUTY_CYCLE,\n",
+ " s.getDutyCycle(window=window),\n",
+ " THRESHOLD)\n",
+ " \n",
+ " print \"\""
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "html": [
+ "<style>\n",
+ "/*\n",
+ "\n",
+ " * Copyright 2015-2015 ARM Limited\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "\n",
+ " * you may not use this file except in compliance with the License.\n",
+ "\n",
+ " * You may obtain a copy of the License at\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * http://www.apache.org/licenses/LICENSE-2.0\n",
+ "\n",
+ " *\n",
+ "\n",
+ " * Unless required by applicable law or agreed to in writing, software\n",
+ "\n",
+ " * distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "\n",
+ " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "\n",
+ " * See the License for the specific language governing permissions and\n",
+ "\n",
+ " * limitations under the License.\n",
+ "\n",
+ " */\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip {\n",
+ "\n",
+ " line-height: 1;\n",
+ "\n",
+ " padding: 12px;\n",
+ "\n",
+ " background: rgba(0, 0, 0, 0.6);\n",
+ "\n",
+ " color: #fff;\n",
+ "\n",
+ " border-radius: 2px;\n",
+ "\n",
+ " position: absolute !important;\n",
+ "\n",
+ " z-index: 99999;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip:after {\n",
+ "\n",
+ " box-sizing: border-box;\n",
+ "\n",
+ " pointer-events: none;\n",
+ "\n",
+ " display: inline;\n",
+ "\n",
+ " font-size: 10px;\n",
+ "\n",
+ " width: 100%;\n",
+ "\n",
+ " line-height: 1;\n",
+ "\n",
+ " color: rgba(0, 0, 0, 0.6);\n",
+ "\n",
+ " content: \"\\25BC\";\n",
+ "\n",
+ " position: absolute !important;\n",
+ "\n",
+ " z-index: 99999;\n",
+ "\n",
+ " text-align: center;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".d3-tip.n:after {\n",
+ "\n",
+ " margin: -1px 0 0 0;\n",
+ "\n",
+ " top: 100%;\n",
+ "\n",
+ " left: 0;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".chart {\n",
+ "\n",
+ " shape-rendering: crispEdges;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".mini text {\n",
+ "\n",
+ " font: 9px sans-serif;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".main text {\n",
+ "\n",
+ " font: 12px sans-serif;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".axis line, .axis path {\n",
+ "\n",
+ " stroke: black;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".miniItem {\n",
+ "\n",
+ " stroke-width: 8;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "\n",
+ "\n",
+ ".brush .extent {\n",
+ "\n",
+ "\n",
+ "\n",
+ " stroke: #000;\n",
+ "\n",
+ " fill-opacity: .125;\n",
+ "\n",
+ " shape-rendering: crispEdges;\n",
+ "\n",
+ "}\n",
+ "\n",
+ "</style>\n",
+ "<div id=\"fig_421afa8cc8234df49030c900b680220b\" class=\"eventplot\">\n",
+ " <script>\n",
+ " var req = require.config( {\n",
+ "\n",
+ " paths: {\n",
+ "\n",
+ " \"EventPlot\": \"https://rawgit.com/sinkap/7f89de3e558856b81f10/raw/46144f8f8c5da670c54f826f0c634762107afc66/EventPlot\",\n",
+ " \"d3-tip\": \"http://labratrevenge.com/d3-tip/javascripts/d3.tip.v0.6.3\",\n",
+ " \"d3\": \"http://d3js.org/d3.v3.min\"\n",
+ " },\n",
+ " shim: {\n",
+ " \"d3-tip\": [\"d3\"],\n",
+ " \"EventPlot\": {\n",
+ "\n",
+ " \"deps\": [\"d3-tip\", \"d3\" ],\n",
+ " \"exports\": \"EventPlot\"\n",
+ " }\n",
+ " }\n",
+ " });\n",
+ " req([\"require\", \"EventPlot\"], function() {\n",
+ " EventPlot.generate('fig_421afa8cc8234df49030c900b680220b', 'https://rawgit.com/sinkap/a207675f6483aa0b9342/raw/825717935112f36fe996b77093c0c71d3871fee4/fig_421afa8cc8234df49030c900b680220b.json');\n",
+ " });\n",
+ " </script>\n",
+ " </div>"
+ ],
+ "metadata": {},
+ "output_type": "display_data",
+ "text": [
+ "<IPython.core.display.HTML object>"
+ ]
+ },
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "WINDOW -> [0.00, 2.00]\n",
+ "PASS: Expected=10 Actual=10.38 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [2.00, 4.00]\n",
+ "PASS: Expected=100 Actual=99.60 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [4.00, 6.00]\n",
+ "PASS: Expected=20 Actual=21.06 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [6.00, 8.00]\n",
+ "PASS: Expected=100 Actual=95.69 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [8.00, 10.00]\n",
+ "PASS: Expected=30 Actual=31.78 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [10.00, 12.00]\n",
+ "PASS: Expected=100 Actual=98.23 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [12.00, 14.00]\n",
+ "PASS: Expected=40 Actual=40.74 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [14.00, 16.00]\n",
+ "PASS: Expected=100 Actual=97.58 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [16.00, 18.00]\n",
+ "PASS: Expected=50 Actual=52.51 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [18.00, 20.00]\n",
+ "PASS: Expected=100 Actual=96.38 THRESHOLD=10.0\n",
+ "\n",
+ "WINDOW -> [20.00, 22.00]\n",
+ "PASS: Expected=60 Actual=60.71 THRESHOLD=10.0\n",
+ "\n"
+ ]
+ }
+ ],
+ "prompt_number": 4
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+} \ No newline at end of file
diff --git a/docs/notebooks/thermal/Thermal.ipynb b/docs/notebooks/thermal/Thermal.ipynb
new file mode 100644
index 0000000..bf51929
--- /dev/null
+++ b/docs/notebooks/thermal/Thermal.ipynb
@@ -0,0 +1,393 @@
+{
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 2",
+ "language": "python",
+ "name": "python2"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 2
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython2",
+ "version": "2.7.6"
+ },
+ "name": "",
+ "signature": "sha256:59ef0b9fe2847e77f9df55deeb6df1f94f4fe2a3a0f99e13cba99854e8bf66ed"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Configuration"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import trappy\n",
+ "import numpy\n",
+ "\n",
+ "config = {}\n",
+ "\n",
+ "# TRAPpy Events\n",
+ "config[\"THERMAL\"] = trappy.thermal.Thermal\n",
+ "config[\"OUT\"] = trappy.cpu_power.CpuOutPower\n",
+ "config[\"IN\"] = trappy.cpu_power.CpuInPower\n",
+ "config[\"PID\"] = trappy.pid_controller.PIDController\n",
+ "config[\"GOVERNOR\"] = trappy.thermal.ThermalGovernor\n",
+ "\n",
+ "# Control Temperature\n",
+ "config[\"CONTROL_TEMP\"] = 77000\n",
+ "\n",
+ "# A temperature margin of 2.5 degrees Celsius\n",
+ "config[\"TEMP_MARGIN\"] = 2500\n",
+ "\n",
+ "# The Sustainable power at the control Temperature\n",
+ "config[\"SUSTAINABLE_POWER\"] = 2500\n",
+ "\n",
+ "# Expected percentile of CONTROL_TEMP + TEMP_MARGIN\n",
+ "config[\"EXPECTED_TEMP_QRT\"] = 95\n",
+ "\n",
+ "# Maximum expected Standard Deviation as a percentage\n",
+ "# of mean temperature\n",
+ "config[\"EXPECTED_STD_PCT\"] = 5\n"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 1
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Get the Trace"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "import urllib\n",
+ "import os\n",
+ "\n",
+ "TRACE_DIR = \"example_trace_dat_thermal\"\n",
+ "TRACE_FILE = os.path.join(TRACE_DIR, 'bart_thermal_trace.dat')\n",
+ "TRACE_URL = 'http://cdn.rawgit.com/sinkap/4e0a69cbff732b57e36f/raw/7dd0ed74bfc17a34a3bd5ea6b9eb3a75a42ddbae/bart_thermal_trace.dat'\n",
+ "\n",
+ "if not os.path.isdir(TRACE_DIR):\n",
+ " os.mkdir(TRACE_DIR)\n",
+ "\n",
+ "if not os.path.isfile(TRACE_FILE):\n",
+ " print \"Fetching trace file..\"\n",
+ " urllib.urlretrieve(TRACE_URL, filename=TRACE_FILE)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "FTrace Object"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# Create a Trace object\n",
+ "\n",
+ "ftrace = trappy.FTrace(TRACE_FILE, \"SomeBenchMark\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Assertions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "# Create an Assertion Object\n",
+ "\n",
+ "from bart.common.Analyzer import Analyzer\n",
+ "t = Analyzer(ftrace, config)\n",
+ "\n",
+ "BIG = '000000f0'\n",
+ "LITTLE = '0000000f'"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 4
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Assertion: Load and Dynamic Power"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<html>\n",
+ "This assertion makes sure that the dynamic power for the each cluster is zero when the sum of the \"loads\" of each CPU is 0\n",
+ "\n",
+ " $$\\forall\\ t\\ |\\ Load(t) = \\sum\\limits_{i=0}^{cpus} Load_i(t) = 0 \\implies dynamic\\ power(t)=0 $$\n",
+ " \n",
+ "</html>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n",
+ " & (IN:dynamic_power > 0)\",reference=True, select=BIG)\n",
+ "if len(result):\n",
+ " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the BIG cluster\"\n",
+ "else:\n",
+ " print \"PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\"\n",
+ "\n",
+ " \n",
+ "result = t.getStatement(\"((IN:load0 + IN:load1 + IN:load2 + IN:load3) == 0) \\\n",
+ " & (IN:dynamic_power > 0)\",reference=True, select=LITTLE)\n",
+ "if len(result):\n",
+ " print \"FAIL: Dynamic Power is NOT Zero when load is Zero for the LITTLE cluster\"\n",
+ "else:\n",
+ " print \"PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\""
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "PASS: Dynamic Power is Zero when load is Zero for the BIG cluster\n",
+ "PASS: Dynamic Power is Zero when load is Zero for the LITTLE cluster\n"
+ ]
+ }
+ ],
+ "prompt_number": 5
+ },
+ {
+ "cell_type": "heading",
+ "level": 2,
+ "metadata": {},
+ "source": [
+ "Assertion: Control Temperature and Sustainable Power"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "<html>\n",
+ "\n",
+ "When the temperature is greater than the control temperature, the total power granted to all cooling devices should be less than sustainable_power\n",
+ "\n",
+ "$$\\forall\\ t\\ |\\ Temperature(t) > control\\_temp \\implies Total\\ Granted\\ Power(t) < sustainable\\_power$$\n",
+ "\n",
+ "<html/>"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = t.getStatement(\"(GOVERNOR:current_temperature > CONTROL_TEMP) &\\\n",
+ " (PID:output > SUSTAINABLE_POWER)\", reference=True, select=0)\n",
+ "\n",
+ "if len(result):\n",
+ " print \"FAIL: The Governor is allocating power > sustainable when T > CONTROL_TEMP\"\n",
+ "else:\n",
+ " print \"PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\" "
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "PASS: The Governor is allocating power <= sustainable when T > CONTROL_TEMP\n"
+ ]
+ }
+ ],
+ "prompt_number": 6
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Statistics"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Check if 95% of the temperature readings are below CONTROL_TEMP + MARGIN"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "t.assertStatement(\"numpy.percentile(THERMAL:temp, 95) < (CONTROL_TEMP + TEMP_MARGIN)\")"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 7,
+ "text": [
+ "True"
+ ]
+ }
+ ],
+ "prompt_number": 7
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Check if the mean temperauture is less than CONTROL_TEMP"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "t.assertStatement(\"numpy.mean(THERMAL:temp) <= CONTROL_TEMP\", select=0)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 8,
+ "text": [
+ "True"
+ ]
+ }
+ ],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can also use getStatement to get the absolute values. Here we are getting the standard deviation expressed as a percentage of the mean"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "t.getStatement(\"(numpy.std(THERMAL:temp) * 100.0) / numpy.mean(THERMAL:temp)\", select=0)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 9,
+ "text": [
+ "2.2390646863105119"
+ ]
+ }
+ ],
+ "prompt_number": 9
+ },
+ {
+ "cell_type": "heading",
+ "level": 1,
+ "metadata": {},
+ "source": [
+ "Thermal Residency"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from bart.thermal.ThermalAssert import ThermalAssert\n",
+ "\n",
+ "t_assert = ThermalAssert(ftrace)\n",
+ "end = ftrace.get_duration()\n",
+ "\n",
+ "LOW = 0\n",
+ "HIGH = 78000\n",
+ "\n",
+ "# The thermal residency gives the percentage (or absolute time) spent in the\n",
+ "# specified temperature range. \n",
+ "\n",
+ "result = t_assert.getThermalResidency(temp_range=(0, 78000),\n",
+ " window=(0, end),\n",
+ " percent=True)\n",
+ "\n",
+ "for tz_id in result:\n",
+ " print \"Thermal Zone: {} spends {:.2f}% time in the temperature range [{}, {}]\".format(tz_id, \n",
+ " result[tz_id],\n",
+ " LOW/1000,\n",
+ " HIGH/1000)\n",
+ " pct_temp = numpy.percentile(t.getStatement(\"THERMAL:temp\")[tz_id], result[tz_id])\n",
+ " \n",
+ " print \"The {:.2f}th percentile temperature is {:.2f}\".format(result[tz_id], pct_temp / 1000.0)\n",
+ " "
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "output_type": "stream",
+ "stream": "stdout",
+ "text": [
+ "Thermal Zone: 0 spends 86.58% time in the temperature range [0, 78]\n",
+ "The 86.58th percentile temperature is 78.28\n"
+ ]
+ }
+ ],
+ "prompt_number": 10
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+} \ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..c4b13e3
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[upload_sphinx]
+upload-dir = docs/api_reference/_build/html
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..c73c225
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from setuptools import setup, find_packages
+
+
+execfile("bart/version.py")
+
+LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general
+expectation of the state of the system while targeting a single or set of heuristics.
+This is particularly helpful when there are large number of factors that can change
+the behaviour of the system and testing all permutations of these input parameters
+is impossible. In such a scenario an assertion of the final expectation can be
+useful in managing performance and regression.
+
+The Behavioural Analysis and Regression Toolkit is based on TRAPpy. The primary goal is
+to assert behaviours using the FTrace output from the kernel
+"""
+
+REQUIRES = [
+ "TRAPpy>=3.0",
+]
+
+setup(name='bart-py',
+ version=__version__,
+ license="Apache v2",
+ author="ARM-BART",
+ author_email="bart@arm.com",
+ description="Behavioural Analysis and Regression Toolkit",
+ long_description=LONG_DESCRIPTION,
+ url="http://arm-software.github.io/bart",
+ packages=find_packages(),
+ include_package_data=True,
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Web Environment",
+ "Environment :: Console",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: Python :: 2.7",
+ # As we depend on trace data from the Linux Kernel/FTrace
+ "Topic :: System :: Operating System Kernels :: Linux",
+ "Topic :: Scientific/Engineering :: Visualization"
+ ],
+ install_requires=REQUIRES
+ )
diff --git a/tests/raw_trace.dat b/tests/raw_trace.dat
new file mode 100644
index 0000000..adfb449
--- /dev/null
+++ b/tests/raw_trace.dat
Binary files differ
diff --git a/tests/test_common_utils.py b/tests/test_common_utils.py
new file mode 100644
index 0000000..09b31e3
--- /dev/null
+++ b/tests/test_common_utils.py
@@ -0,0 +1,130 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from bart.common import Utils
+from bart.common.Analyzer import Analyzer
+import unittest
+import pandas as pd
+import trappy
+
+
+class TestCommonUtils(unittest.TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(TestCommonUtils, self).__init__(*args, **kwargs)
+
+ def test_interval_sum(self):
+ """Test Utils Function: interval_sum"""
+
+ # A series with a non uniform index
+ # Refer to the example illustrations in the
+ # the interval sum docs-strings which explains
+ # the difference between step-post and ste-pre
+ # calculations
+ values = [0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1]
+ index = [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12]
+ series = pd.Series(values, index=index)
+
+ self.assertEqual(Utils.interval_sum(series, 1, step="post"), 8)
+ self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 7)
+
+ # check left boundary
+ array = [1, 1, 0, 0]
+ series = pd.Series(array)
+
+ self.assertEqual(Utils.interval_sum(series, 1, step="post"), 2)
+ self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 1)
+
+ # check right boundary
+ array = [0, 0, 1, 1]
+ series = pd.Series(array)
+
+ self.assertEqual(Utils.interval_sum(series, 1, step="post"), 1)
+ self.assertEqual(Utils.interval_sum(series, 1, step="pre"), 2)
+
+ array = [False, False, True, True, True, True, False, False]
+ series = pd.Series(array)
+ self.assertEqual(Utils.interval_sum(series), 4)
+
+ def test_area_under_curve(self):
+ """Test Utils function: area_under_curve"""
+
+ array = [0, 0, 2, 2, 2, 1, 1, 1]
+ series = pd.Series(array)
+
+ # Area under curve post stepping
+ self.assertEqual(
+ Utils.area_under_curve(
+ series,
+ method="rect",
+ step="post"),
+ 8)
+
+ # Area under curve pre stepping
+ self.assertEqual(
+ Utils.area_under_curve(
+ series,
+ method="rect",
+ step="pre"),
+ 9)
+
+ array = [1]
+ series = pd.Series(array)
+
+ # Area under curve post stepping, edge case
+ self.assertEqual(
+ Utils.area_under_curve(
+ series,
+ method="rect",
+ step="post"),
+ 0)
+
+ # Area under curve pre stepping, edge case
+ self.assertEqual(
+ Utils.area_under_curve(
+ series,
+ method="rect",
+ step="pre"),
+ 0)
+
+
+class TestAnalyzer(unittest.TestCase):
+
+ def test_assert_statement_bool(self):
+ """Check that asssertStatement() works with a simple boolean case"""
+
+ rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]})
+ trace = trappy.BareTrace()
+ trace.add_parsed_event("dice_rolls", rolls_dfr)
+ config = {"MAX_DICE_NUMBER": 6}
+
+ t = Analyzer(trace, config)
+ statement = "numpy.max(dice_rolls:results) <= MAX_DICE_NUMBER"
+ self.assertTrue(t.assertStatement(statement, select=0))
+
+ def test_assert_statement_dataframe(self):
+ """assertStatement() works if the generated statement creates a pandas.DataFrame of bools"""
+
+ rolls_dfr = pd.DataFrame({"results": [1, 3, 2, 6, 2, 4]})
+ trace = trappy.BareTrace()
+ trace.add_parsed_event("dice_rolls", rolls_dfr)
+ config = {"MIN_DICE_NUMBER": 1, "MAX_DICE_NUMBER": 6}
+ t = Analyzer(trace, config)
+
+ statement = "(dice_rolls:results <= MAX_DICE_NUMBER) & (dice_rolls:results >= MIN_DICE_NUMBER)"
+ self.assertTrue(t.assertStatement(statement))
+
+ statement = "dice_rolls:results == 3"
+ self.assertFalse(t.assertStatement(statement))
diff --git a/tests/test_sched_assert.py b/tests/test_sched_assert.py
new file mode 100644
index 0000000..4f8c28b
--- /dev/null
+++ b/tests/test_sched_assert.py
@@ -0,0 +1,116 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from bart.sched.SchedAssert import SchedAssert
+from bart.sched.SchedMultiAssert import SchedMultiAssert
+import trappy
+from trappy.stats.Topology import Topology
+import unittest
+
+import utils_tests
+
+
+@unittest.skipUnless(utils_tests.trace_cmd_installed(),
+ "trace-cmd not installed")
+class TestSchedAssert(utils_tests.SetupDirectory):
+
+ def __init__(self, *args, **kwargs):
+
+ self.BIG = [1,2]
+ self.LITTLE = [0, 3, 4, 5]
+ self.clusters = [self.BIG, self.LITTLE]
+ self.topology = Topology(clusters=self.clusters)
+ super(TestSchedAssert, self).__init__(
+ [("raw_trace.dat", "trace.dat")],
+ *args,
+ **kwargs)
+
+ def test_get_runtime(self):
+
+ r = trappy.FTrace()
+ # The ls process is process we are
+ # testing against with pre calculated
+ # values
+ process = "ls"
+
+ # Complete duration
+ expected_time = 0.0034740000264719129
+ s = SchedAssert(r, self.topology, execname=process)
+ self.assertAlmostEqual(s.getRuntime(), expected_time, places=9)
+ self.assertAlmostEqual(s.getRuntime(), expected_time, places=9)
+
+ # Non Interrupted Window
+ window = (0.0034, 0.003525)
+ expected_time = 0.000125
+ self.assertAlmostEqual(s.getRuntime(window=window), expected_time,
+ places=9)
+
+ # Interrupted Window
+ window = (0.0030, 0.0032)
+ expected_time = 0.000166
+ self.assertAlmostEqual(s.getRuntime(window=window), expected_time,
+ places=9)
+
+ # A window with multiple interruptions
+ window = (0.0027, 0.0036)
+ expected_time = 0.000817
+ self.assertAlmostEqual(s.getRuntime(window=window), expected_time,
+ places=9)
+
+ def test_get_last_cpu(self):
+ """SchedAssert.getLastCpu() gives you the last cpu in which a task ran"""
+ expected_last_cpu = 5
+
+ sa = SchedAssert("trace.dat", self.topology, execname="ls")
+ self.assertEqual(sa.getLastCpu(), expected_last_cpu)
+
+class TestSchedMultiAssert(utils_tests.SetupDirectory):
+ def __init__(self, *args, **kwargs):
+ self.big = [1,2]
+ self.little = [0, 3, 4, 5]
+ self.clusters = [self.big, self.little]
+ self.all_cpus = sorted(self.big + self.little)
+ self.topology = Topology(clusters=self.clusters)
+ super(TestSchedMultiAssert, self).__init__(
+ [("raw_trace.dat", "trace.dat")],
+ *args,
+ **kwargs)
+
+ def test_cpu_busy_time(self):
+ """SchedMultiAssert.getCPUBusyTime() work"""
+
+ # precalculated values against these processes in the trace
+ pids = [4729, 4734]
+ first_time = .000214
+ last_time = .003171
+
+ tr = trappy.FTrace()
+ sma = SchedMultiAssert(tr, self.topology, pids=pids)
+
+ expected_busy_time = 0.0041839999754810708
+ busy_time = sma.getCPUBusyTime("all", self.all_cpus, window=(first_time, last_time))
+ self.assertAlmostEqual(busy_time, expected_busy_time)
+
+ # percent calculation
+ expected_busy_pct = 23.582459561949445
+ busy_pct= sma.getCPUBusyTime("all", self.all_cpus, percent=True,
+ window=(first_time, last_time))
+ self.assertAlmostEqual(busy_pct, expected_busy_pct)
+
+ # percent without a window
+ expected_busy_pct = 23.018818156540004
+ busy_pct= sma.getCPUBusyTime("cluster", self.little, percent=True)
+ self.assertAlmostEqual(busy_pct, expected_busy_pct)
diff --git a/tests/test_sched_functions.py b/tests/test_sched_functions.py
new file mode 100644
index 0000000..1a8d4ac
--- /dev/null
+++ b/tests/test_sched_functions.py
@@ -0,0 +1,69 @@
+# Copyright 2016-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import trappy
+
+import utils_tests
+
+class TestSchedFunctions(utils_tests.SetupDirectory):
+ def __init__(self, *args, **kwargs):
+ super(TestSchedFunctions, self).__init__([], *args, **kwargs)
+
+ def test_get_pids_for_processes_no_sched_switch(self):
+ """get_pids_for_processes() raises an exception if the trace doesn't have a sched_switch event"""
+ from bart.sched.functions import get_pids_for_process
+
+ trace_file = "trace.txt"
+ raw_trace_file = "trace.raw.txt"
+
+ with open(trace_file, "w") as fout:
+ fout.write("")
+
+ with open(raw_trace_file, "w") as fout:
+ fout.write("")
+
+ trace = trappy.FTrace(trace_file)
+ with self.assertRaises(ValueError):
+ get_pids_for_process(trace, "foo")
+
+ def test_get_pids_for_process_funny_process_names(self):
+ """get_pids_for_process() works when a process name is a substring of another"""
+ from bart.sched.functions import get_pids_for_process
+
+ trace_file = "trace.txt"
+ raw_trace_file = "trace.raw.txt"
+ in_data = """ <idle>-0 [001] 10826.894644: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=0 next_comm=rt-app next_pid=3268 next_prio=120
+ wmig-3268 [001] 10826.894778: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=rt-app next_pid=3269 next_prio=120
+ wmig1-3269 [001] 10826.905152: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=1 next_comm=wmig next_pid=3268 next_prio=120
+ wmig-3268 [001] 10826.915384: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/1 next_pid=0 next_prio=120
+ <idle>-0 [005] 10826.995169: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120
+ wmig1-3269 [005] 10827.007064: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120
+ wmig-3268 [005] 10827.019061: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=0 next_comm=wmig1 next_pid=3269 next_prio=120
+ wmig1-3269 [005] 10827.031061: sched_switch: prev_comm=wmig1 prev_pid=3269 prev_prio=120 prev_state=0 next_comm=wmig next_pid=3268 next_prio=120
+ wmig-3268 [005] 10827.050645: sched_switch: prev_comm=wmig prev_pid=3268 prev_prio=120 prev_state=1 next_comm=swapper/5 next_pid=0 next_prio=120
+"""
+
+ # We create an empty trace.txt to please trappy ...
+ with open(trace_file, "w") as fout:
+ fout.write("")
+
+ # ... but we only put the sched_switch events in the raw trace
+ # file because that's where trappy is going to look for
+ with open(raw_trace_file, "w") as fout:
+ fout.write(in_data)
+
+ trace = trappy.FTrace(trace_file)
+
+ self.assertEquals(get_pids_for_process(trace, "wmig"), [3268])
diff --git a/tests/test_signal.py b/tests/test_signal.py
new file mode 100644
index 0000000..48692a9
--- /dev/null
+++ b/tests/test_signal.py
@@ -0,0 +1,104 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pandas as pd
+import trappy
+from utils_tests import TestBART
+from bart.common.signal import SignalCompare
+import numpy as np
+
+
+class TestSignalCompare(TestBART):
+
+ def __init__(self, *args, **kwargs):
+ super(TestSignalCompare, self).__init__(*args, **kwargs)
+
+ def test_conditional_compare(self):
+ """Test conditional_compare"""
+
+ # Refer to the example in
+ # bart.common.signal.SignalCompare.conditional_compare
+ # doc-strings which explains the calculation for the
+ # data set below
+ A = [0, 0, 0, 3, 3, 0, 0, 0]
+ B = [0, 0, 2, 2, 2, 2, 1, 1]
+
+ trace = trappy.BareTrace()
+ df = pd.DataFrame({"A": A, "B": B})
+ trace.add_parsed_event("event", df)
+
+ s = SignalCompare(trace, "event:A", "event:B")
+ expected = (1.5, 2.0 / 7)
+ self.assertEqual(
+ s.conditional_compare(
+ "event:A > event:B",
+ method="rect"),
+ expected)
+
+ def test_get_overshoot(self):
+ """Test get_overshoot"""
+
+ A = [0, 0, 0, 3, 3, 0, 0, 0]
+ B = [0, 0, 2, 2, 2, 2, 1, 1]
+
+ trace = trappy.BareTrace()
+ df = pd.DataFrame({"A": A, "B": B})
+ trace.add_parsed_event("event", df)
+
+ s = SignalCompare(trace, "event:A", "event:B")
+ expected = (1.5, 2.0 / 7)
+ self.assertEqual(
+ s.get_overshoot(method="rect"),
+ expected)
+
+ A = [0, 0, 0, 1, 1, 0, 0, 0]
+ B = [0, 0, 2, 2, 2, 2, 1, 1]
+
+ df = pd.DataFrame({"A": A, "B": B})
+ trace.event.data_frame = df
+ s = SignalCompare(trace, "event:A", "event:B")
+
+ expected = (float("nan"), 0.0)
+ result = s.get_overshoot(method="rect")
+ self.assertTrue(np.isnan(result[0]))
+ self.assertEqual(result[1], expected[1])
+
+ def test_get_undershoot(self):
+ """Test get_undershoot"""
+
+ A = [0, 0, 0, 1, 1, 1, 1, 1]
+ B = [2, 2, 2, 2, 2, 2, 2, 2]
+
+ trace = trappy.BareTrace()
+ df = pd.DataFrame({"A": A, "B": B})
+ trace.add_parsed_event("event", df)
+
+ s = SignalCompare(trace, "event:A", "event:B")
+ expected = (4.0 / 14.0, 1.0)
+ self.assertEqual(
+ s.get_undershoot(method="rect"),
+ expected)
+
+ A = [3, 3, 3, 3, 3, 3, 3, 3]
+ B = [2, 2, 2, 2, 2, 2, 1, 1]
+
+ df = pd.DataFrame({"A": A, "B": B})
+ trace.event.data_frame = df
+ s = SignalCompare(trace, "event:A", "event:B")
+
+ expected = (float("nan"), 0.0)
+ result = s.get_undershoot(method="rect")
+ self.assertTrue(np.isnan(result[0]))
+ self.assertEqual(result[1], expected[1])
diff --git a/tests/trace.raw.txt b/tests/trace.raw.txt
new file mode 100644
index 0000000..f66d55b
--- /dev/null
+++ b/tests/trace.raw.txt
@@ -0,0 +1,7 @@
+version = 6
+CPU 3 is empty
+CPU 4 is empty
+cpus=6
+ ls-4734 [002] 106439.675591: sched_switch: prev_comm=trace-cmd prev_pid=4734 prev_prio=120 prev_state=1024 next_comm=migration/2 next_pid=18 next_prio=0
+ migration/2-18 [002] 106439.675613: sched_switch: prev_comm=migration/2 prev_pid=18 prev_prio=0 prev_state=1 next_comm=trace-cmd next_pid=4732 next_prio=120
+ trace-cmd-4730 [001] 106439.675718: sched_switch: prev_comm=trace-cmd prev_pid=4730 prev_prio=120 prev_state=1 next_comm=trace-cmd next_pid=4729 next_prio=120
diff --git a/tests/trace.txt b/tests/trace.txt
new file mode 100644
index 0000000..4fbf4c9
--- /dev/null
+++ b/tests/trace.txt
@@ -0,0 +1,7 @@
+version = 6
+CPU 3 is empty
+CPU 4 is empty
+cpus=6
+ ls-4734 [002] 106439.675591: sched_switch: trace-cmd:4734 [120] R ==> migration/2:18 [0]
+ migration/2-18 [002] 106439.675613: sched_switch: migration/2:18 [0] S ==> trace-cmd:4732 [120]
+ trace-cmd-4731 [001] 106439.675698: sched_switch: trace-cmd:4731 [120] S ==> trace-cmd:4730 [120]
diff --git a/tests/utils_tests.py b/tests/utils_tests.py
new file mode 100644
index 0000000..6dadca1
--- /dev/null
+++ b/tests/utils_tests.py
@@ -0,0 +1,66 @@
+# Copyright 2015-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import unittest
+import os
+import shutil
+import subprocess
+import tempfile
+
+TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
+
+
+def trace_cmd_installed():
+ """Return true if trace-cmd is installed, false otherwise"""
+ with open(os.devnull) as devnull:
+ try:
+ subprocess.check_call(["trace-cmd", "options"], stdout=devnull)
+ except OSError:
+ return False
+
+ return True
+
+class SetupDirectory(unittest.TestCase):
+
+ def __init__(self, files_to_copy, *args, **kwargs):
+ self.files_to_copy = files_to_copy
+ super(SetupDirectory, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ self.previous_dir = os.getcwd()
+
+ self.out_dir = tempfile.mkdtemp()
+ os.chdir(self.out_dir)
+
+ for src_fname, dst_fname in self.files_to_copy:
+ src_fname = os.path.join(TESTS_DIRECTORY, src_fname)
+ shutil.copy(src_fname, os.path.join(self.out_dir, dst_fname))
+
+ def tearDown(self):
+ os.chdir(self.previous_dir)
+ shutil.rmtree(self.out_dir)
+
+
+class TestBART(SetupDirectory):
+
+ def __init__(self, *args, **kwargs):
+ super(TestBART, self).__init__(
+ [
+ ("./trace.txt", "trace.txt"),
+ ("./trace.raw.txt", "trace.raw.txt")
+ ],
+ *args,
+ **kwargs)