aboutsummaryrefslogtreecommitdiff
path: root/bestflags/testing_batch.py
diff options
context:
space:
mode:
authorYuheng Long <yuhenglong@google.com>2013-08-07 08:48:05 -0700
committerChromeBot <chrome-bot@google.com>2013-08-07 22:14:42 -0700
commit057ad5c77ef0ea44ee260718eb6d30afef2f7f83 (patch)
tree75dc92c063a32304259fc834382c9cf7fa314602 /bestflags/testing_batch.py
parent570b2ce931d40a335f3b02ed859b376040b5fb67 (diff)
downloadtoolchain-utils-057ad5c77ef0ea44ee260718eb6d30afef2f7f83.tar.gz
Add the Genetic Algorithm.
BUG=None TEST=unit testings for the pipeline stage, pipeline workers, generation, steering, task, flag, hill climbing and genetic algorithm. Change-Id: I2864d6a6859fff43bc2d3afb059c672c54bbe385 Reviewed-on: https://gerrit-int.chromium.org/42472 Reviewed-by: Simon Que <sque@google.com> Commit-Queue: Yuheng Long <yuhenglong@google.com> Tested-by: Yuheng Long <yuhenglong@google.com>
Diffstat (limited to 'bestflags/testing_batch.py')
-rw-r--r--bestflags/testing_batch.py80
1 files changed, 75 insertions, 5 deletions
diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py
index 8ec2cd9d..ad1ab4b7 100644
--- a/bestflags/testing_batch.py
+++ b/bestflags/testing_batch.py
@@ -19,6 +19,8 @@ import unittest
import flags
from flags import Flag
from flags import FlagSet
+from genetic_algorithm import GAGeneration
+from genetic_algorithm import GATask
from hill_climb_best_neighbor import HillClimbingBestBranch
import pipeline_process
from steering import Steering
@@ -33,6 +35,12 @@ NUM_FLAGS = 5
# The value range of the flags.
FLAG_RANGES = 10
+# The following variables are meta data for the Genetic Algorithm.
+STOP_THRESHOLD = 20
+NUM_CHROMOSOMES = 10
+NUM_TRIALS = 20
+MUTATION_RATE = 0.03
+
def _GenerateRandomRasks(specs):
"""Generate a task that has random values.
@@ -92,6 +100,35 @@ def _GenerateNoFlagTask():
return set([Task(FlagSet([]))])
+def _GenerateRandomGATasks(specs, num_tasks, num_trials):
+ """Generate a set of tasks for the Genetic Algorithm.
+
+ Args:
+ specs: A list of spec from which the flag set is created.
+ num_tasks: number of tasks that should be generated.
+ num_trials: the maximum number of tries should be attempted to generate the
+ set of tasks.
+
+ Returns:
+ A set of randomly generated tasks.
+ """
+
+ tasks = set([])
+
+ total_trials = 0
+ while len(tasks) < num_tasks and total_trials < num_trials:
+ new_flag = FlagSet([Flag(spec) for spec in specs if random.randint(0, 1)])
+ new_task = GATask(new_flag)
+
+ if new_task in tasks:
+ total_trials += 1
+ else:
+ tasks.add(new_task)
+ total_trials = 0
+
+ return tasks
+
+
def _ComputeCost(cost_func, specs, flag_set):
"""Compute the mock cost of the flag_set using the input cost function.
@@ -148,7 +185,9 @@ def _TestAlgorithm(cost_func, specs, generations, best_result):
specs: All the specs that are used in the algorithm. This is used to check
whether certain flag is disabled in the flag_set dictionary.
generations: The initial generations to be evaluated.
- best_result: The expected best result of the algorithm.
+ best_result: The expected best result of the algorithm. If best_result is
+ -1, the algorithm may or may not return the best value. Therefore, no
+ assertion will be inserted.
"""
# Set up the utilities to test the framework.
@@ -184,16 +223,19 @@ def _TestAlgorithm(cost_func, specs, generations, best_result):
output_queue.put(task)
pp_steer.join()
- assert best_result == result
+
+ # Only do this test when best_result is not -1.
+ if best_result != -1:
+ assert best_result == result
class FlagAlgorithms(unittest.TestCase):
"""This class test the FlagSet class."""
def testBestHillClimb(self):
- """Test the equal method of the Class FlagSet.
+ """Test the best hill climb algorithm.
- Two FlagSet instances are equal if all their flags are equal.
+ Test whether it finds the best results as expected.
"""
# Initiate the build/test command and the log directory.
@@ -217,7 +259,7 @@ class FlagAlgorithms(unittest.TestCase):
# This test uses a cost function that is the negative of the previous cost
# function. Therefore, the best result should be found in task with all the
# flags enabled.
- cost_function = '-sum(values[0:len(values)])'
+ cost_function = 'sys.maxint - sum(values[0:len(values)])'
all_flags = list(generation_tasks)[0].GetFlags()
cost = _ComputeCost(cost_function, specs, all_flags)
@@ -230,6 +272,34 @@ class FlagAlgorithms(unittest.TestCase):
# 0, i.e., when all the flags are disabled.
_TestAlgorithm(cost_function, specs, generations, cost)
+ def testGeneticAlgorithm(self):
+ """Test the Genetic Algorithm.
+
+ Do a function testing here and see how well it scales.
+ """
+
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(None, None, 'output')
+
+ # Generate the testing specs.
+ mock_test_file = 'scale_mock_test'
+ _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file)
+ specs = flags.ReadConf(mock_test_file)
+
+ # Initiate the build/test command and the log directory.
+ GAGeneration.InitMetaData(STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS,
+ specs, MUTATION_RATE)
+
+ # Generate the initial generations.
+ generation_tasks = _GenerateRandomGATasks(specs, NUM_CHROMOSOMES,
+ NUM_TRIALS)
+ generations = [GAGeneration(generation_tasks, set([]), 0)]
+
+ # Test the algorithm.
+ _TestAlgorithm('sum(values[0:len(values)])', specs, generations, -1)
+ cost_func = 'sys.maxint - sum(values[0:len(values)])'
+ _TestAlgorithm(cost_func, specs, generations, -1)
+
if __name__ == '__main__':
unittest.main()