aboutsummaryrefslogtreecommitdiff
path: root/bestflags
diff options
context:
space:
mode:
authorYuheng Long <yuhenglong@google.com>2013-08-02 14:27:45 -0700
committerChromeBot <chrome-bot@google.com>2013-08-07 12:43:42 -0700
commitccfaf2f382815945c1c883e1cfceb2eba55e28b0 (patch)
tree3e05a3bc1b30c0118ee11d705ba94ec7ceb6c999 /bestflags
parent6aa8528c03697a1f28186c574d512c19935676a5 (diff)
downloadtoolchain-utils-ccfaf2f382815945c1c883e1cfceb2eba55e28b0.tar.gz
Add the best branching hill climbing algorithm.
BUG=None TEST=unit testings for the pipeline stage, pipeline workers, generation, steering, task, flag and hill climbing. Change-Id: Ifd181f45c4b82f5fb77b0d4946757954aa806d33 Reviewed-on: https://gerrit-int.chromium.org/42284 Tested-by: Yuheng Long <yuhenglong@google.com> Reviewed-by: Simon Que <sque@google.com> Commit-Queue: Yuheng Long <yuhenglong@google.com>
Diffstat (limited to 'bestflags')
-rw-r--r--bestflags/flags.py27
-rw-r--r--bestflags/flags_util.py77
-rw-r--r--bestflags/generation.py9
-rw-r--r--bestflags/hill_climb_best_neighbor.py99
-rw-r--r--bestflags/pipeline_process_test.py3
-rw-r--r--bestflags/task.py120
-rw-r--r--bestflags/task_test.py30
-rw-r--r--bestflags/testing_batch.py235
8 files changed, 533 insertions, 67 deletions
diff --git a/bestflags/flags.py b/bestflags/flags.py
index b1f337ab..a89f6301 100644
--- a/bestflags/flags.py
+++ b/bestflags/flags.py
@@ -39,6 +39,30 @@ _FLAG_FILLOUT_VALUE_RE = re.compile(r'\[([^\]]*)\]')
rx = re.compile(r'\[(?P<start>\d+)-(?P<end>\d+)\]')
+# Search the numeric flag pattern.
+def Search(spec):
+ return rx.search(spec)
+
+
+def ReadConf(file_name):
+ """Parse the configuration file.
+
+ The configuration contains one flag specification in each line.
+
+ Args:
+ file_name: The name of the configuration file.
+
+ Returns:
+ A list of specs in the configuration file.
+ """
+
+ with open(file_name, 'r') as input_file:
+ lines = input_file.readlines()
+
+ return sorted([line.strip() for line in lines if line.strip()])
+ return []
+
+
class Flag(object):
"""A class representing a particular command line flag argument.
@@ -57,6 +81,9 @@ class Flag(object):
if value == -1:
result = rx.search(spec)
+ # The value of a boolean flag is 0.
+ value = 0
+
# If this is a numeric flag, a value is chosen within start and end, start
# inclusive and end exclusive.
if result:
diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py
new file mode 100644
index 00000000..01cc8d66
--- /dev/null
+++ b/bestflags/flags_util.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility functions to explore the neighbor flags.
+
+Part of the Chrome build flags optimization.
+"""
+
+__author__ = 'yuhenglong@google.com (Yuheng Long)'
+
+
+import flags
+from flags import Flag
+
+
+def ClimbNext(flags_dict, climb_spec):
+ """Get the flags who are different from flags_dict by climb_spec.
+
+ Args:
+ flags_dict: The dictionary containing the original flags whose neighbors are
+ to be explored.
+ climb_spec: The spec in the flags_dict is to be changed.
+
+ Returns:
+ A dictionary of neighbor flags.
+ """
+
+ result = flags.Search(climb_spec)
+
+ # If the flags do not contain the spec.
+ if climb_spec not in flags_dict:
+ results = flags_dict.copy()
+
+ if result:
+ # Numeric flags.
+ results[climb_spec] = Flag(climb_spec, int(result.group('start')))
+ else:
+ # Boolean flags.
+ results[climb_spec] = Flag(climb_spec)
+
+ return [results]
+
+ # The flags contain the spec.
+ if not result:
+ # Boolean flags.
+ results = flags_dict.copy()
+ del results[climb_spec]
+ return [results]
+
+ # Numeric flags.
+ flag = flags_dict[climb_spec]
+
+ # The value of the flag having spec.
+ value = flag.GetValue()
+ results = []
+
+ if value + 1 < int(result.group('end')):
+ # If the value is not the end value, explore the value that is 1 larger than
+ # the current value.
+ neighbor = flags_dict.copy()
+ neighbor[climb_spec] = Flag(climb_spec, value + 1)
+ results.append(neighbor)
+
+ if value > int(result.group('start')):
+ # If the value is not the start value, explore the value that is 1 lesser
+ # than the current value.
+ neighbor = flags_dict.copy()
+ neighbor[climb_spec] = Flag(climb_spec, value - 1)
+ results.append(neighbor)
+ else:
+ # Delete the value.
+ neighbor = flags_dict.copy()
+ del neighbor[climb_spec]
+ results.append(neighbor)
+
+ return results
diff --git a/bestflags/generation.py b/bestflags/generation.py
index 7f0e94a1..0bc2f57c 100644
--- a/bestflags/generation.py
+++ b/bestflags/generation.py
@@ -107,19 +107,24 @@ class Generation(object):
return True
def Improve(self):
- """True if this generation has improvement over its parent generation.
+ """True if this generation has improvement upon its parent generation.
Raises:
NoneOverridingError: The subclass should override this method.
"""
raise NoneOverridingError('Must be implemented by child class')
- def Next(self):
+ def Next(self, _):
"""Calculate the next generation.
This is the core of the framework implementation. It must be overridden by
the concrete subclass to implement algorithm specific generations.
+ Args:
+ _: A set of tasks that have been generated before. The overridden method
+ in the subclasses can use this so as not to generate task that has been
+ generated before.
+
Returns:
A set of new generations.
diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py
new file mode 100644
index 00000000..5bc3f7ed
--- /dev/null
+++ b/bestflags/hill_climb_best_neighbor.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A variation of the hill climbing algorithm.
+
+Part of the Chrome build flags optimization.
+
+This algorithm explores all the neighbors of the current task. If at least one
+neighbor gives better performance than the current task, it explores the best
+neighbor.
+"""
+
+__author__ = 'yuhenglong@google.com (Yuheng Long)'
+
+
+from flags import FlagSet
+import flags_util
+from generation import Generation
+from task import Task
+
+
+class HillClimbingBestBranch(Generation):
+ """A variation of the hill climbing algorithm.
+
+ Given a task, it explores all its neighbors. Pick if best neighbor for the
+ next iteration.
+ """
+
+ def __init__(self, exe_pool, parents, specs):
+ """Set up the tasks set of this generation.
+
+ Args:
+ exe_pool: A set of tasks to be run.
+ parents: A set of tasks to be used to check whether their neighbors have
+ improved upon them.
+ specs: A list of specs to explore. The spec specifies the flags that can
+ be changed to find neighbors of a task.
+ """
+
+ Generation.__init__(self, exe_pool, parents)
+ self._specs = specs
+
+ def Improve(self):
+ """True if this generation has improvement over its parent generation.
+
+ Returns:
+ True if the best neighbor improves upon the parent task.
+ """
+
+ # Find the best neighbor.
+ best_task = None
+ for task in self._exe_pool:
+ if not best_task or task.Improve(best_task):
+ best_task = task
+
+ if not best_task:
+ return False
+
+ # The first generation may not have parent generation.
+ parents = list(self._candidate_pool)
+ if parents:
+ assert len(parents) == 1
+ self._next_task = best_task
+ # If the best neighbor improves upon the parent task.
+ return best_task.Improve(parents[0])
+
+ self._next_task = best_task
+ return True
+
+ def Next(self, cache):
+ """Calculate the next generation.
+
+ The best neighbor b of the current task is the parent of the next
+ generation. The neighbors of b will be the set of tasks to be evaluated
+ next.
+
+ Args:
+ cache: A set of tasks that have been generated before.
+
+ Returns:
+ A set of new generations.
+ """
+
+ # The best neighbor.
+ current_task = self._next_task
+ flag_set = current_task.GetFlags()
+
+ # The neighbors of the best neighbor.
+ children_tasks = set([])
+ for spec in self._specs:
+ for next_flag in flags_util.ClimbNext(flag_set.GetFlags(), spec):
+ new_task = Task(FlagSet(next_flag.values()))
+
+ if new_task not in cache:
+ children_tasks.add(new_task)
+
+ return [HillClimbingBestBranch(children_tasks, set([current_task]),
+ self._specs)]
diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py
index 1d5af49f..08b4e87d 100644
--- a/bestflags/pipeline_process_test.py
+++ b/bestflags/pipeline_process_test.py
@@ -49,9 +49,6 @@ class PipelineProcessTest(unittest.TestCase):
also be passed to the next pipeline stage via the output queue.
"""
- def setUp(self):
- pass
-
def testRun(self):
"""Test the run method.
diff --git a/bestflags/task.py b/bestflags/task.py
index 4391c41d..77901602 100644
--- a/bestflags/task.py
+++ b/bestflags/task.py
@@ -26,6 +26,23 @@ TEST_STAGE = 2
# Message indicating that the build or test failed.
ERROR_STRING = 'error'
+# The maximum number of tries a build can have. Some compilations may fail due
+# to unexpected environment circumstance. This variable defines how many tries
+# the build should attempt before giving up.
+BUILD_TRIES = 3
+
+# The maximum number of tries a build can have. Some tests may fail due to
+# unexpected environment circumstance. This variable defines how many tries the
+# test should attempt before giving up.
+TEST_TRIES = 3
+
+
+# Create the file/directory if it does not already exist.
+def _CreateDirectory(file_name):
+ directory = os.path.dirname(file_name)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
class Task(object):
"""A single reproducing entity.
@@ -34,41 +51,46 @@ class Task(object):
flag set, the image, the check sum of the image and the cost.
"""
- def __init__(self, flag_set, build_command, test_command, log_directory,
- build_tries, test_tries):
- """Set up the optimization flag selection for this task.
+ # The command that will be used in the build stage to compile the tasks.
+ BUILD_COMMAND = None
+ # The command that will be used in the test stage to test the tasks.
+ TEST_COMMAND = None
+ # The directory to log the compilation and test results.
+ LOG_DIRECTORY = None
+
+ @staticmethod
+ def InitLogCommand(build_command, test_command, log_directory):
+ """Set up the build and test command for the task and the log directory.
This framework is generic. It lets the client specify application specific
compile and test methods by passing different build_command and
test_command.
Args:
- flag_set: The optimization flag set that is encapsulated by this task.
build_command: The command that will be used in the build stage to compile
this task.
test_command: The command that will be used in the test stage to test this
task.
log_directory: The directory to log the compilation and test results.
- build_tries: The maximum number of tries a build can have. Some
- compilations may fail due to unexpected environment circumstance. This
- variable defines how many tries the build should attempt before giving
- up.
- test_tries: The maximum number of tries a build can have. Some tests may
- fail due to unexpected environment circumstance. This variable defines
- how many tries the test should attempt before giving up.
+ """
+
+ Task.BUILD_COMMAND = build_command
+ Task.TEST_COMMAND = test_command
+ Task.LOG_DIRECTORY = log_directory
+
+ def __init__(self, flag_set):
+ """Set up the optimization flag selection for this task.
+
+ Args:
+ flag_set: The optimization flag set that is encapsulated by this task.
"""
self._flag_set = flag_set
- self._build_command = build_command
- self._test_command = test_command
- self._log_directory = log_directory
- self._build_tries = build_tries
- self._test_tries = test_tries
# A unique identifier that distinguishes this task from other tasks.
- self.task_identifier = uuid4()
+ self._task_identifier = uuid4()
- self._log_path = (self._log_directory, self.task_identifier)
+ self._log_path = (Task.LOG_DIRECTORY, self._task_identifier)
# Initiate the hash value. The hash value is used so as not to recompute it
# every time the hash method is called.
@@ -113,7 +135,7 @@ class Task(object):
"""
# Define the dictionary for different stage function lookup.
- get_identifier_functions = {BUILD_STAGE: self.GetFlags,
+ get_identifier_functions = {BUILD_STAGE: self.FormattedFlags,
TEST_STAGE: self.__GetCheckSum}
assert stage in get_identifier_functions
@@ -130,7 +152,7 @@ class Task(object):
# Define the dictionary for different stage function lookup.
get_result_functions = {BUILD_STAGE: self.__GetBuildResult,
- TEST_STAGE: self.__GetTestResult}
+ TEST_STAGE: self.GetTestResult}
assert stage in get_result_functions
@@ -185,13 +207,22 @@ class Task(object):
work_functions[stage]()
+ def FormattedFlags(self):
+ """Format the optimization flag set of this task.
+
+ Returns:
+ The formatted optimization flag set that is encapsulated by this task.
+ """
+ return str(self._flag_set.FormattedForUse())
+
def GetFlags(self):
"""Get the optimization flag set of this task.
Returns:
The optimization flag set that is encapsulated by this task.
"""
- return str(self._flag_set.FormattedForUse())
+
+ return self._flag_set
def __GetCheckSum(self):
"""Get the compilation image checksum of this task.
@@ -217,11 +248,11 @@ class Task(object):
# used to compile different tasks, these processes can use the identifier to
# write to different file.
flags = self._flag_set.FormattedForUse()
- command = '%s %s %s' % (self._build_command, ' '.join(flags),
- self.task_identifier)
+ command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags),
+ self._task_identifier)
# Try build_tries number of times before confirming that the build fails.
- for _ in range(self._build_tries):
+ for _ in range(BUILD_TRIES):
# Execute the command and get the execution status/results.
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@@ -264,11 +295,11 @@ class Task(object):
# The unique identifier is passed to the test command. If concurrent
# processes are used to compile different tasks, these processes can use the
# identifier to write to different file.
- command = '%s %s %s' % (self._test_command, self._image,
- self.task_identifier)
+ command = '%s %s %s' % (Task.TEST_COMMAND, self._image,
+ self._task_identifier)
# Try build_tries number of times before confirming that the build fails.
- for _ in range(self._test_tries):
+ for _ in range(TEST_TRIES):
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
@@ -298,23 +329,12 @@ class Task(object):
return (self._checksum, self._build_cost, self._image, self._file_length,
self._text_length)
- def __GetTestResult(self):
+ def GetTestResult(self):
return self._exe_cost
def __SetTestResult(self, exe_cost):
self._exe_cost = exe_cost
- def __CreateDirectory(self, file_name):
- """Create a directory/file if it does not already exist.
-
- Args:
- file_name: The path of the directory/file to be created.
- """
-
- d = os.path.dirname(file_name)
- if not os.path.exists(d):
- os.makedirs(d)
-
def LogSteeringCost(self):
"""Log the performance results for the task.
@@ -324,7 +344,7 @@ class Task(object):
steering_log = '%s/%s/steering.txt' % self._log_path
- self.create_dir('%s/%s/' % steering_log)
+ _CreateDirectory(steering_log)
with open(steering_log, 'w') as out_file:
# Include the build and the test results.
@@ -346,7 +366,7 @@ class Task(object):
build_log = '%s/%s/build.txt' % self._log_path
- self.__CreateDirectory(build_log)
+ _CreateDirectory(build_log)
with open(build_log, 'w') as out_file:
build_result = (self._flag_set, self._build_cost, self._image,
@@ -363,10 +383,26 @@ class Task(object):
test_log = '%s/%s/build.txt' % self._log_path
- self.__CreateDirectory(test_log)
+ _CreateDirectory(test_log)
with open(test_log, 'w') as out_file:
test_result = (self._flag_set, self._checksum, self._exe_cost)
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s\n' % test_result)
+
+ def Improve(self, other):
+ """Compare the current task with another task.
+
+ Args:
+ other: The other task against which the current task is compared.
+
+ Returns:
+ True if this task has improvement upon the other task.
+ """
+
+ # The execution costs must have been initiated.
+ assert self._exe_cost is not None
+ assert other.GetTestResult() is not None
+
+ return self._exe_cost < other.GetTestResult()
diff --git a/bestflags/task_test.py b/bestflags/task_test.py
index 3777a097..a40e4ad4 100644
--- a/bestflags/task_test.py
+++ b/bestflags/task_test.py
@@ -26,11 +26,6 @@ RANDOM_BUILD_RESULT = 100
RANDOM_TESTRESULT = 100
-# Create task for unittestings that only uses the flag set field of the task.
-def _IdentifierTask(identifier):
- return Task(identifier, None, None, None, None, None)
-
-
class MockFlagSet(object):
"""This class emulates a set of flags.
@@ -71,18 +66,14 @@ class TaskTest(unittest.TestCase):
# Two tasks having the same flag set should be equivalent.
flag_sets = [MockFlagSet(flag) for flag in flags]
for flag_set in flag_sets:
- task0 = _IdentifierTask(flag_set)
- task1 = _IdentifierTask(flag_set)
-
- assert task0 == task1
+ assert Task(flag_set) == Task(flag_set)
# Two tasks having different flag set should be different.
for flag_set in flag_sets:
- task0 = _IdentifierTask(flag_set)
+ test_task = Task(flag_set)
other_flag_sets = [flags for flags in flag_sets if flags != flag_set]
for flag_set1 in other_flag_sets:
- task1 = _IdentifierTask(flag_set1)
- assert task0 != task1
+ assert test_task != Task(flag_set1)
def testHash(self):
"""Test the hash method of the task.
@@ -96,13 +87,12 @@ class TaskTest(unittest.TestCase):
flag_sets = [MockFlagSet(identifier, value) for value in range(NUM_FLAGS)]
for flag_set in flag_sets:
# The hash of a task is the same as the hash of its flag set.
- hash_task = _IdentifierTask(flag_set)
- h0 = hash(hash_task)
- assert h0 == flag_set.GetHash()
+ hash_task = Task(flag_set)
+ hash_value = hash(hash_task)
+ assert hash_value == flag_set.GetHash()
# The hash of a task does not change.
- h1 = hash(hash_task)
- assert h0 == h1
+ assert hash_value == hash(hash_task)
def testGetIdentifier(self):
"""Test the get identifier method of the task.
@@ -112,7 +102,7 @@ class TaskTest(unittest.TestCase):
flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
for flag_set in flag_sets:
- identifier_task = _IdentifierTask(flag_set)
+ identifier_task = Task(flag_set)
identifier = identifier_task.GetIdentifier(task.BUILD_STAGE)
@@ -127,7 +117,7 @@ class TaskTest(unittest.TestCase):
flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
for flag_set in flag_sets:
- result_task = _IdentifierTask(flag_set)
+ result_task = Task(flag_set)
# The get result method should return the same results as were set, in
# build stage. Currently, the build result is a 5-element tuple containing
@@ -161,7 +151,7 @@ class TaskTest(unittest.TestCase):
flag_sets = [MockFlagSet(flag) for flag in flags]
for flag_set in flag_sets:
- work_task = _IdentifierTask(flag_set)
+ work_task = Task(flag_set)
# The task has not been compiled nor tested.
assert not work_task.Done(task.TEST_STAGE)
diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py
new file mode 100644
index 00000000..8ec2cd9d
--- /dev/null
+++ b/bestflags/testing_batch.py
@@ -0,0 +1,235 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Hill climbing unitest.
+
+Part of the Chrome build flags optimization.
+
+Test the variations of the hill climbing algorithms.
+"""
+
+__author__ = 'yuhenglong@google.com (Yuheng Long)'
+
+import multiprocessing
+import random
+import sys
+import unittest
+
+import flags
+from flags import Flag
+from flags import FlagSet
+from hill_climb_best_neighbor import HillClimbingBestBranch
+import pipeline_process
+from steering import Steering
+from task import BUILD_STAGE
+from task import Task
+from task import TEST_STAGE
+
+
+# The number of flags be tested.
+NUM_FLAGS = 5
+
+# The value range of the flags.
+FLAG_RANGES = 10
+
+
+def _GenerateRandomRasks(specs):
+ """Generate a task that has random values.
+
+ Args:
+ specs: A list of spec from which the flag set is created.
+
+ Returns:
+ A set containing a task that has random values.
+ """
+
+ flag_set = []
+
+ for spec in specs:
+ result = flags.Search(spec)
+ if result:
+ # Numeric flags.
+ start = int(result.group('start'))
+ end = int(result.group('end'))
+
+ value = random.randint(start - 1, end - 1)
+ if value != start - 1:
+ # If the value falls in the range, this flag is enabled.
+ flag_set.append(Flag(spec, value))
+ else:
+ # Boolean flags.
+ if random.randint(0, 1):
+ flag_set.append(Flag(spec))
+
+ return set([Task(FlagSet(flag_set))])
+
+
+def _GenerateAllFlagsTasks(specs):
+ """Generate a task that all the flags are enable.
+
+ All the boolean flags in the specs will be enabled and all the numeric flag
+ with have the largest legal value.
+
+ Args:
+ specs: A list of spec from which the flag set is created.
+
+ Returns:
+ A set containing a task that has all flags enabled.
+ """
+
+ flag_set = []
+
+ for spec in specs:
+ result = flags.Search(spec)
+ value = (int(result.group('end')) - 1) if result else -1
+ flag_set.append(Flag(spec, value))
+
+ return set([Task(FlagSet(flag_set))])
+
+
+def _GenerateNoFlagTask():
+ return set([Task(FlagSet([]))])
+
+
+def _ComputeCost(cost_func, specs, flag_set):
+ """Compute the mock cost of the flag_set using the input cost function.
+
+ All the boolean flags in the specs will be enabled and all the numeric flag
+ with have the largest legal value.
+
+ Args:
+ cost_func: The cost function which is used to compute the mock cost of a
+ dictionary of flags.
+ specs: All the specs that are used in the algorithm. This is used to check
+ whether certain flag is disabled in the flag_set dictionary.
+ flag_set: a dictionary of the spec and flag pairs.
+
+ Returns:
+ The mock cost of the input dictionary of the flags.
+ """
+
+ values = []
+
+ for spec in specs:
+ # If a flag is enabled, its value is added. Otherwise a padding 0 is added.
+ values.append(flag_set[spec].GetValue() if spec in flag_set else 0)
+
+ # The cost function string can use the values array.
+ return eval(cost_func)
+
+
+def _GenerateTestFlags(num_flags, upper_bound, file_name):
+ """Generate a set of mock flags and write it to a configuration file.
+
+ Generate a set of mock flags
+
+ Args:
+ num_flags: Number of numeric flags to be generated.
+ upper_bound: The value of the upper bound of the range.
+ file_name: The configuration file name into which the mock flags are put.
+ """
+
+ with open(file_name, 'w') as output_file:
+ num_flags = int(num_flags)
+ upper_bound = int(upper_bound)
+ for i in range(num_flags):
+ output_file.write('%s=[1-%d]\n' % (i, upper_bound))
+
+
+def _TestAlgorithm(cost_func, specs, generations, best_result):
+ """Test the best result the algorithm should return.
+
+ Set up the framework, run the input algorithm and verify the result.
+
+ Args:
+ cost_func: The cost function which is used to compute the mock cost of a
+ dictionary of flags.
+ specs: All the specs that are used in the algorithm. This is used to check
+ whether certain flag is disabled in the flag_set dictionary.
+ generations: The initial generations to be evaluated.
+ best_result: The expected best result of the algorithm.
+ """
+
+ # Set up the utilities to test the framework.
+ manager = multiprocessing.Manager()
+ input_queue = manager.Queue()
+ output_queue = manager.Queue()
+ pp_steer = multiprocessing.Process(target=Steering,
+ args=(set(), generations, output_queue,
+ input_queue))
+ pp_steer.start()
+
+ # The best result of the algorithm so far.
+ result = sys.maxint
+
+ while True:
+ task = input_queue.get()
+
+ # POISONPILL signal the ends of the algorithm.
+ if task == pipeline_process.POISONPILL:
+ break
+
+ task.SetResult(BUILD_STAGE, (0, 0, 0, 0, 0))
+
+ # Compute the mock cost for the task.
+ task_result = _ComputeCost(cost_func, specs, task.GetFlags())
+ task.SetResult(TEST_STAGE, task_result)
+
+ # If the mock result of the current task is the best so far, set this
+ # result to be the best result.
+ if task_result < result:
+ result = task_result
+
+ output_queue.put(task)
+
+ pp_steer.join()
+ assert best_result == result
+
+
+class FlagAlgorithms(unittest.TestCase):
+ """This class test the FlagSet class."""
+
+ def testBestHillClimb(self):
+ """Test the equal method of the Class FlagSet.
+
+ Two FlagSet instances are equal if all their flags are equal.
+ """
+
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(None, None, 'output')
+
+ # Generate the testing specs.
+ mock_test_file = 'scale_mock_test'
+ _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file)
+ specs = flags.ReadConf(mock_test_file)
+
+ # Generate the initial generations for a test whose cost function is the
+ # summation of the values of all the flags.
+ generation_tasks = _GenerateAllFlagsTasks(specs)
+ generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
+
+ # Test the algorithm. The cost function is the summation of all the values
+ # of all the flags. Therefore, the best value is supposed to be 0, i.e.,
+ # when all the flags are disabled.
+ _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0)
+
+ # This test uses a cost function that is the negative of the previous cost
+ # function. Therefore, the best result should be found in task with all the
+ # flags enabled.
+ cost_function = '-sum(values[0:len(values)])'
+ all_flags = list(generation_tasks)[0].GetFlags()
+ cost = _ComputeCost(cost_function, specs, all_flags)
+
+ # Generate the initial generations.
+ generation_tasks = _GenerateNoFlagTask()
+ generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
+
+ # Test the algorithm. The cost function is negative of the summation of all
+ # the values of all the flags. Therefore, the best value is supposed to be
+ # 0, i.e., when all the flags are disabled.
+ _TestAlgorithm(cost_function, specs, generations, cost)
+
+
+if __name__ == '__main__':
+ unittest.main()