summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Doan <doanac@gmail.com>2012-01-29 23:35:01 -0600
committerAndy Doan <doanac@gmail.com>2012-01-31 18:08:37 -0600
commit8269cafdc57e366ec55b4b2fdb6eb275eb65ee9b (patch)
tree7571e1a17a0e6af113cb5df0cbaeab2d37cc5214
parent79da738a8d993ce1f9995a150b6324e4dcadb636 (diff)
downloadandroid_benchmark_views-8269cafdc57e366ec55b4b2fdb6eb275eb65ee9b.tar.gz
move report code off CombinedResults
This moves all the code away from the original code that messed around with the JSON representation of the report and puts the data together using the queries from the BenchmarkRuns and then assembles them using JavaScript.
-rw-r--r--android_benchmark_views_app/benchmark_run.py175
-rw-r--r--android_benchmark_views_app/templates/android_benchmark_views_app/report.html79
-rw-r--r--android_benchmark_views_app/views.py10
3 files changed, 60 insertions, 204 deletions
diff --git a/android_benchmark_views_app/benchmark_run.py b/android_benchmark_views_app/benchmark_run.py
deleted file mode 100644
index e41fa8f..0000000
--- a/android_benchmark_views_app/benchmark_run.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/python
-
-import numpy
-import sys
-
-class TestResultAverages:
- def __init__(self, test_case_id):
- self.name = test_case_id
- self.measurements = []
-
- def add_measurement(self, m):
- self.measurements.append(m)
-
- def get_avg(self):
- return numpy.average(self.measurements)
-
- def get_std(self):
- return numpy.std(self.measurements)
-
-class TestRunAverages:
- def __init__(self, test_id):
- self.name = test_id
- self.test_results = []
-
- def bigger_is_better(self):
- if self.name == 'skia':
- return False
- return True
-
- def b_is_b_str(self):
- if self.name == 'Totals':
- return ''
-
- if self.bigger_is_better():
- return "Bigger is better"
- return "Smaller is better"
-
- def _get_results(self, test_case_id):
- ''' finds the existing test_case_id results or creates one'''
- for tr in self.test_results:
- if tr.name == test_case_id:
- return tr
-
- tr = TestResultAverages(test_case_id)
- self.test_results.append(tr)
- return tr
-
- def add_result(self, test_case_id, measurement):
- tr = self._get_results(test_case_id)
- tr.add_measurement(measurement)
-
-def get_totals_default(measurements):
- total = 0.0
- for m in measurements:
- total += m.get_avg()
- return total
-
-def get_v8_totals(measurements):
- return measurements[-1].get_avg()
-
-def add_totals(results):
- ''' prepends a summary of totals to the list '''
- thismodule = sys.modules[__name__]
-
- tra = TestRunAverages('Totals')
- for tr in results:
- fname = "get_%s_totals" % tr.name
- func = getattr(thismodule, fname, get_totals_default)
- total = func(tr.test_results)
- tra.add_result(tr.name, total)
-
- results.insert(0, tra)
-
-def get_test_run(results, test_id):
- for tr in results:
- if tr.name == test_id:
- return tr
- tr = TestRunAverages(test_id)
- results.append(tr)
- return tr
-
-def get_summary(test_run):
- ''' returns an array of TestRunAverages'''
- results = []
-
- for tr in test_run.get_test_results():
- tra = get_test_run(results, tr.test.test_id)
-
- for result in tr.test_run.get_results():
- tra.add_result(result.test_case.test_case_id, float(result.measurement))
-
- add_totals(results)
-
- return results
-
-def sanity_check(results):
- i = 0
- for tra in results[0]:
- for r in results:
- if r[i].name != tra.name:
- raise Exception("test result orders don't match")
-
- j = 0
- for tr in tra.test_results:
- if r[i].test_results[j].name != tr.name:
- raise Exception("test cases don't match for %s" % tra.name)
- j += 1
- i += 1
-
-class CombinedResult:
- ''' holds results in arrays that can be used by pyplot'''
- def __init__(self, test_case_id):
- self.name = test_case_id
- self.measurements = []
- self.std_devs = []
- self.labels = []
-
- def add_result(self, test_result_avg):
- self.measurements.append(test_result_avg.get_avg())
- self.std_devs.append(test_result_avg.get_std())
- self.labels.append(test_result_avg.name)
-
-
-class CombinedRun:
- def __init__(self, tra, run_labels):
- self.name = tra.name
- self.test_results = []
- self.b_is_b_str = tra.b_is_b_str()
- self.run_labels = run_labels
-
- def _get_results(self, test_case_id):
- ''' finds the existing test_case_id results or creates one'''
- for tr in self.test_results:
- if tr.name == test_case_id:
- return tr
-
- tr = CombinedResult(test_case_id)
- self.test_results.append(tr)
- return tr
-
- def add_test_result(self, test_result_avg):
- cr = self._get_results(test_result_avg.name)
- cr.add_result(test_result_avg)
-
- def as_flot_runs(self):
- vals = []
- for i in range(len(self.test_results[0].measurements)):
- data = []
- j = 1
- for res in self.test_results:
- data.append( [j, res.measurements[i], res.std_devs[i]/2] )
- j+=1
- vals.append( {'label': self.run_labels[i], 'data': data} )
- return vals
-
-def get_combined_run(run_labels, results, tra):
- for cr in results:
- if cr.name == tra.name:
- return cr
- cr = CombinedRun(tra, run_labels)
- results.append(cr)
- return cr
-
-def combine_summaries(run_labels, summaries):
- sanity_check(summaries)
-
- combined_results = []
-
- for results in summaries:
- for test_run_avgs in results:
- cr = get_combined_run(run_labels, combined_results, test_run_avgs)
- for test_result_avgs in test_run_avgs.test_results:
- cr.add_test_result(test_result_avgs)
-
- return combined_results
diff --git a/android_benchmark_views_app/templates/android_benchmark_views_app/report.html b/android_benchmark_views_app/templates/android_benchmark_views_app/report.html
index 6e02d0d..2b05403 100644
--- a/android_benchmark_views_app/templates/android_benchmark_views_app/report.html
+++ b/android_benchmark_views_app/templates/android_benchmark_views_app/report.html
@@ -13,6 +13,52 @@
radius: 0, errorbars: "y",
yerr: {show:true, upperCap: "-", lowerCap: "-", radius: 5}
};
+
+ var bruns = []; //benchmark runs array
+ var tra = {}; //tra - test run averages
+{% for run in runs %}
+ bruns.push('{{run.label}}');
+ tra['{{run.label}}'] = {
+ {% for ta in run.get_test_averages %}
+ '{{ta.test}}': [
+ {% for tra in ta.test_result_averages %}
+ {'test': '{{tra.test_case__test_case_id}}', 'average': {{tra.average}}, 'std_dev': {{tra.std_dev}}},
+ {% endfor %}
+ ],
+ {% endfor %}
+ };
+{% endfor %}
+
+function get_test_result_averages(test_run, test) {
+ var results = []
+ var avgs = tra[test_run][test];
+ for (var i=0; i < avgs.length; i++)
+ results.push([(i+1), avgs[i]['average'], avgs[i]['std_dev']]);
+ return results;
+}
+
+function get_test_result_average(test_run, test, test_result) {
+ averages = tra[test_run][test];
+ for(i = 0; i < averages.length; i++) {
+ if( averages[i]['test'] == test_result )
+ return averages[i]['average'];
+ }
+ alert("ERROR missing test result for: " + test_run + ": " + test + "->" + test_result);
+ return 0;
+}
+
+function populate_table(test, tableid) {
+ //use the test results order from the first run to define what we show
+ main_test_results = tra[bruns[0]][test];
+ for(var i=0; i< main_test_results.length; i++) {
+ tr = main_test_results[i]['test'];
+ cols = [tr];
+ for( j = 0; j < bruns.length; j++)
+ cols.push(get_test_result_average(bruns[j], test, tr));
+ row = '<tr><td>' + cols.join('<td>') + '</td></tr>';
+ $(tableid +' > tbody:last').append(row);
+ }
+}
</script>
{% endblock %}
{% block sidebar %}
@@ -27,18 +73,19 @@
{% block content %}
<h2>Android Toolchain Benchmark Results for the {{report.series}} Engineering Cycle</h2>
- {% for combined_run in combined_results %}
+ {% for main_ta in runs.0.get_test_averages %}
<script type="text/javascript" charset="utf-8">
$(document).ready(function() {
- oTable = $('#{{combined_run.name}}-table').dataTable({
+ populate_table('{{main_ta.test}}', '#{{main_ta.test}}-table');
+ oTable = $('#{{main_ta.test}}-table').dataTable({
"bJQueryUI": true,
"bPaginate": false,
"aaSorting": [],});
- h = "<div class='dataTables_info'><span class='bmark_run'>{{combined_run.name}}</span> - <span class='bisb'>{{combined_run.b_is_b_str}}</span></div>";
- $("#{{combined_run.name}}-table_wrapper").find('div.fg-toolbar:first').prepend(h);
+ h = "<div class='dataTables_info'><span class='bmark_run'>{{main_ta.test}}</span> - <span class='bisb'>{{main_ta.b_is_b_str}}</span></div>";
+ $("#{{main_ta.test}}-table_wrapper").find('div.fg-toolbar:first').prepend(h);
});
</script>
- <table class="demo_jui display" id="{{combined_run.name}}-table">
+ <table class="demo_jui display" id="{{main_ta.test}}-table">
<thead>
<tr>
<th>Test case</th>
@@ -47,16 +94,10 @@
{% endfor %}
</tr>
</thead>
- {% for result in combined_run.test_results %}
- <tr>
- <td>{{result.name}}</td>
- {% for m in result.measurements %}
- <td>{{m}}</td>
- {% endfor %}
- </tr>
- {% endfor %}
+ <tbody>
+ </tbody>
</table>
- <div id="placeholder-{{combined_run.name}}" style="width:600px;height:350px"></div>
+ <div id="placeholder-{{main_ta.test}}" style="width:600px;height:350px"></div>
<script type="text/javascript" charset="utf-8">
$(document).ready(function() {
var options = {
@@ -68,23 +109,23 @@
},
xaxis: {
ticks: [
- {% for result in combined_run.test_results %}
- [{{forloop.counter}},'{{result.name}}'],
+ {% for tra in main_ta.test_result_averages %}
+ [{{forloop.counter}},'{{tra.test_case__test_case_id}}'],
{% endfor %}
],
labelAngle:-45,
},
};
var data = [
- {% for run in combined_run.as_flot_runs %}
+ {% for run in runs %}
{
label: '{{run.label}}',
- data: {{run.data}},
+ data: get_test_result_averages('{{run.label}}', '{{main_ta.test}}'),
points: data_points
},
{% endfor %}
];
- $.plot($("#placeholder-{{combined_run.name}}"), data, options);
+ $.plot($("#placeholder-{{main_ta.test}}"), data, options);
});
</script>
<br/>
diff --git a/android_benchmark_views_app/views.py b/android_benchmark_views_app/views.py
index a33adf8..47ed6b7 100644
--- a/android_benchmark_views_app/views.py
+++ b/android_benchmark_views_app/views.py
@@ -21,8 +21,6 @@ from django.template import RequestContext
import dashboard_app.views
-import android_benchmark_views_app.benchmark_run as benchmark_run
-
from android_benchmark_views_app.models import (
BenchmarkReport,
BenchmarkRun,
@@ -56,18 +54,10 @@ def report(request, series):
br = get_object_or_404(BenchmarkReport, series=series)
runs = BenchmarkRun.objects.filter(report=br)
- summaries = []
- runlbls = []
- for run in runs:
- summaries.append(benchmark_run.get_summary(run))
- runlbls.append(run.label)
- combined_results = benchmark_run.combine_summaries(runlbls, summaries)
-
return render_to_response(
"android_benchmark_views_app/report.html", {
'report': br,
'runs': runs,
- 'combined_results': combined_results,
'bread_crumb_trail': BreadCrumbTrail.leading_to(
report, series=series)