summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Doan <andy.doan@linaro.org>2012-02-03 17:02:28 -0600
committerAndy Doan <andy.doan@linaro.org>2012-02-07 15:53:31 -0800
commit858f3d555e8c5d1cdd3b8eb5a98722e19a833cab (patch)
tree24e582326e42c8cf8cd6e70fb72b82906f8b78fd
parentf2d8e3e07fa4f904552a4914df3648abf20bcedc (diff)
downloadandroid_benchmark_views-858f3d555e8c5d1cdd3b8eb5a98722e19a833cab.tar.gz
add some comments and remove unused function
Remove an unused function Add comments to help describe what the model objects do Signed-off-by: Andy Doan <andy.doan@linaro.org> Change-Id: I8dc152408090fdd53a93c32c8ea0bb8fae985b03
-rw-r--r--android_benchmark_views_app/helpers.py6
-rw-r--r--android_benchmark_views_app/models.py22
2 files changed, 24 insertions, 4 deletions
diff --git a/android_benchmark_views_app/helpers.py b/android_benchmark_views_app/helpers.py
index 44f8a76..3bf12b4 100644
--- a/android_benchmark_views_app/helpers.py
+++ b/android_benchmark_views_app/helpers.py
@@ -70,7 +70,11 @@ def _add_totals(test_averages):
def benchmark_run_test_averages(benchmarkrun):
'''
- returns a data structure suitable for the run.html template in the
+ Benchmark runs will consist of multiple runs of a test like "v8".
+ This function performs queries on the test results in the benchmark
+ run to build up average/deviation metrics for each test result.
+
+ Returns a data structure suitable for the run.html template in the
format like:
[
{'test': 'v8',
diff --git a/android_benchmark_views_app/models.py b/android_benchmark_views_app/models.py
index c8d40bd..02da9eb 100644
--- a/android_benchmark_views_app/models.py
+++ b/android_benchmark_views_app/models.py
@@ -22,6 +22,16 @@ from dashboard_app import models as lava_models
import android_benchmark_views_app.helpers as helpers
class BenchmarkReport(models.Model):
+ """
+ Model for representing a monthly Android Toolchain Benchmark.
+
+ Monthly benchmarks compare results of android benchmarks that have been
+ run with versions of android built for each toolchain to be tested.
+
+ The series is normally something like "2012.02" for the 2012.02
+ engineering cycle comparisons.
+ """
+
series = models.CharField(max_length=16, unique=True)
comments = models.TextField(blank=True, null=True)
@@ -35,6 +45,14 @@ class BenchmarkReport(models.Model):
return self.series
class BenchmarkRun(models.Model):
+ """
+ Models one benchmarking run ran with a build of a certain toolchain.
+
+ The label is used to describe the toolchain used for the run,
+ ie "linaro-4.6" or "android-4.4". The results of the run are a typical
+ LAVA bundle
+ """
+
label = models.CharField(max_length=16)
report = models.ForeignKey(
BenchmarkReport,
@@ -43,9 +61,6 @@ class BenchmarkRun(models.Model):
lava_models.Bundle,
related_name='benchmark_runs')
- def get_test_results(self):
- return lava_models.TestResult.objects.filter(test_run__bundle=self.bundle)
-
@models.permalink
def get_absolute_url(self):
return ('android_benchmark_views_app.views.run_summary',
@@ -55,5 +70,6 @@ class BenchmarkRun(models.Model):
return u'%s-%s' % (self.report.series, self.label)
def get_test_averages(self):
+ """see helpers.benchmark_run_test_averages for description """
return helpers.benchmark_run_test_averages(self)