aboutsummaryrefslogtreecommitdiff
path: root/bestflags
diff options
context:
space:
mode:
authorYuheng Long <yuhenglong@google.com>2013-08-14 10:17:25 -0700
committerChromeBot <chrome-bot@google.com>2013-08-16 22:12:08 -0700
commitce860ea3fc65c488b9ea5e93efbf521dae9dc7dd (patch)
tree0b8a0793a9eacbba3caedb0f3c543e7ee338f618 /bestflags
parent25cdf79e17a73858ffa28db8a5b387210fea9e25 (diff)
downloadtoolchain-utils-ce860ea3fc65c488b9ea5e93efbf521dae9dc7dd.tar.gz
Fixed the problems in task and steering.
BUG=None TEST=unit testings for the pipeline stage, pipeline workers, generation, steering, task, flag, hill climbing and genetic algorithm. Change-Id: Ib34ef05518124b6ffa03f0c35795109e22581b89 Reviewed-on: https://gerrit-int.chromium.org/43002 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Yuheng Long <yuhenglong@google.com> Tested-by: Yuheng Long <yuhenglong@google.com>
Diffstat (limited to 'bestflags')
-rw-r--r--bestflags/pipeline_process.py5
-rw-r--r--bestflags/pipeline_process_test.py7
-rw-r--r--bestflags/task.py125
-rw-r--r--bestflags/testing_batch.py18
4 files changed, 102 insertions, 53 deletions
diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py
index 8c98fdb2..e77d92cc 100644
--- a/bestflags/pipeline_process.py
+++ b/bestflags/pipeline_process.py
@@ -91,7 +91,7 @@ class PipelineProcess(multiprocessing.Process):
# the helper process
helper_process = multiprocessing.Process(target=self._helper,
- args=(self._cache,
+ args=(self._stage, self._cache,
self._helper_queue,
self._work_queue,
self._result_queue))
@@ -112,7 +112,8 @@ class PipelineProcess(multiprocessing.Process):
self._helper_queue.put(task)
else:
# Let the workers do the actual work.
- work_pool.apply_async(self._worker, args=(task, self._work_queue,
+ work_pool.apply_async(self._worker, args=(self._stage, task,
+ self._work_queue,
self._result_queue))
mycache.append(task_key)
diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py
index 989fd742..77d72db5 100644
--- a/bestflags/pipeline_process_test.py
+++ b/bestflags/pipeline_process_test.py
@@ -21,8 +21,10 @@ ERROR = -334
TEST_STAGE = -8
-def MockHelper(done_dict, helper_queue, _, result_queue):
+def MockHelper(stage, done_dict, helper_queue, _, result_queue):
"""This method echos input to the output."""
+
+ assert stage == TEST_STAGE
while True:
if not helper_queue.empty():
task = helper_queue.get()
@@ -37,7 +39,8 @@ def MockHelper(done_dict, helper_queue, _, result_queue):
result_queue.put(('helper', task.GetIdentifier(TEST_STAGE)))
-def MockWorker(task, _, result_queue):
+def MockWorker(stage, task, _, result_queue):
+ assert stage == TEST_STAGE
result_queue.put(('worker', task.GetIdentifier(TEST_STAGE)))
diff --git a/bestflags/task.py b/bestflags/task.py
index e41b0aea..ee85b1a4 100644
--- a/bestflags/task.py
+++ b/bestflags/task.py
@@ -203,7 +203,7 @@ class Task(object):
"""
# Define the dictionary for different stage function lookup.
- work_functions = {BUILD_STAGE: self.__Compile(), TEST_STAGE: self.__Test()}
+ work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test}
assert stage in work_functions
@@ -253,35 +253,43 @@ class Task(object):
command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags),
self._task_identifier)
- # Try build_tries number of times before confirming that the build fails.
+ # Try BUILD_TRIES number of times before confirming that the build fails.
for _ in range(BUILD_TRIES):
- # Execute the command and get the execution status/results.
- p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- (out, err) = p.communicate()
-
- if not err and out != ERROR_STRING:
- # Each build results contains the checksum of the result image, the
- # performance cost of the build, the compilation image, the length of
- # the build, and the length of the text section of the build.
- (checksum, cost, image, file_length, text_length) = out.split()
- # Build successfully.
- break
-
- # Build failed.
- cost = ERROR_STRING
+ try:
+ # Execute the command and get the execution status/results.
+ p = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (out, err) = p.communicate()
+
+ if out:
+ out = out.strip()
+ if out != ERROR_STRING:
+ # Each build results contains the checksum of the result image, the
+ # performance cost of the build, the compilation image, the length
+ # of the build, and the length of the text section of the build.
+ (checksum, cost, image, file_length, text_length) = out.split()
+ # Build successfully.
+ break
+
+ # Build failed.
+ cost = ERROR_STRING
+ except _:
+ # If there is exception getting the cost information of the build, the
+ # build failed.
+ cost = ERROR_STRING
# Convert the build cost from String to integer. The build cost is used to
# compare a task with another task. Set the build cost of the failing task
- # to the max integer.
- self._build_cost = sys.maxint if cost == ERROR_STRING else int(cost)
+ # to the max integer. The for loop will keep trying until either there is a
+ # success or BUILD_TRIES number of tries have been conducted.
+ self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost)
self._checksum = checksum
self._file_length = file_length
self._text_length = text_length
self._image = image
- self.__LogBuildCost()
+ self.__LogBuildCost(err)
def __Test(self):
"""__Test the task against benchmark(s) using the input test command."""
@@ -300,24 +308,32 @@ class Task(object):
command = '%s %s %s' % (Task.TEST_COMMAND, self._image,
self._task_identifier)
- # Try build_tries number of times before confirming that the build fails.
+ # Try TEST_TRIES number of times before confirming that the build fails.
for _ in range(TEST_TRIES):
- p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- (out, err) = p.communicate()
-
- if not err and out != ERROR_STRING:
- # The test results contains the performance cost of the test.
- cost = out
- # Test successfully.
- break
-
- # Test failed.
- cost = ERROR_STRING
-
- self._exe_cost = sys.maxint if (cost == ERROR_STRING) else int(cost)
-
- self.__LogTestCost()
+ try:
+ p = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (out, err) = p.communicate()
+
+ if out:
+ out = out.strip()
+ if out != ERROR_STRING:
+ # The test results contains the performance cost of the test.
+ cost = out
+ # Test successfully.
+ break
+
+ # Test failed.
+ cost = ERROR_STRING
+ except _:
+ # If there is exception getting the cost information of the test, the
+ # test failed. The for loop will keep trying until either there is a
+ # success or TEST_TRIES number of tries have been conducted.
+ cost = ERROR_STRING
+
+ self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost)
+
+ self.__LogTestCost(err)
def __SetBuildResult(self, (checksum, build_cost, image, file_length,
text_length)):
@@ -357,33 +373,47 @@ class Task(object):
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s,%s,%s,%s,%s\n' % steering_result)
- def __LogBuildCost(self):
+ def __LogBuildCost(self, log):
"""Log the build results for the task.
The build results include the compilation time of the build, the result
image, the checksum, the file length and the text length of the image.
The file length of the image includes the length of the file of the image.
The text length only includes the length of the text section of the image.
+
+ Args:
+ log: The build log of this task.
"""
- build_log = '%s/%s/build.txt' % self._log_path
+ build_result_log = '%s/%s/build.txt' % self._log_path
- _CreateDirectory(build_log)
+ _CreateDirectory(build_result_log)
- with open(build_log, 'w') as out_file:
+ with open(build_result_log, 'w') as out_file:
build_result = (self._flag_set, self._build_cost, self._image,
self._checksum, self._file_length, self._text_length)
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s,%s,%s,%s\n' % build_result)
- def __LogTestCost(self):
+ # The build information about running the build.
+ build_run_log = '%s/%s/build_log.txt' % self._log_path
+ _CreateDirectory(build_run_log)
+
+ with open(build_run_log, 'w') as out_log_file:
+ # Write out the execution information.
+ out_log_file.write('%s' % log)
+
+ def __LogTestCost(self, log):
"""Log the test results for the task.
The test results include the runtime execution time of the test.
+
+ Args:
+ log: The test log of this task.
"""
- test_log = '%s/%s/build.txt' % self._log_path
+ test_log = '%s/%s/test.txt' % self._log_path
_CreateDirectory(test_log)
@@ -393,6 +423,15 @@ class Task(object):
# Write out the result in the comma-separated format (CSV).
out_file.write('%s,%s,%s\n' % test_result)
+ # The execution information about running the test.
+ test_run_log = '%s/%s/test_log.txt' % self._log_path
+
+ _CreateDirectory(test_run_log)
+
+ with open(test_run_log, 'w') as out_log_file:
+ # Append the test log information.
+ out_log_file.write('%s' % log)
+
def IsImproved(self, other):
"""Compare the current task with another task.
diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py
index f44c4f66..97ea450a 100644
--- a/bestflags/testing_batch.py
+++ b/bestflags/testing_batch.py
@@ -104,7 +104,7 @@ def _GenerateNoFlagTask():
return set([Task(FlagSet([]))])
-def _GenerateRandomGATasks(specs, num_tasks, num_trials):
+def GenerateRandomGATasks(specs, num_tasks, num_trials):
"""Generate a set of tasks for the Genetic Algorithm.
Args:
@@ -233,8 +233,14 @@ def _TestAlgorithm(cost_func, specs, generations, best_result):
assert best_result == result
-class FlagAlgorithms(unittest.TestCase):
- """This class test the FlagSet class."""
+class MockAlgorithmsTest(unittest.TestCase):
+ """This class mock tests different steering algorithms.
+
+ The steering algorithms are responsible for generating the next set of tasks
+ to run in each iteration. This class does a functional testing on the
+ algorithms. It mocks out the computation of the fitness function from the
+ build and test phases by letting the user define the fitness function.
+ """
def testBestHillClimb(self):
"""Test the best hill climb algorithm.
@@ -279,7 +285,7 @@ class FlagAlgorithms(unittest.TestCase):
def testGeneticAlgorithm(self):
"""Test the Genetic Algorithm.
- Do a function testing here and see how well it scales.
+ Do a functional testing here and see how well it scales.
"""
# Initiate the build/test command and the log directory.
@@ -295,8 +301,8 @@ class FlagAlgorithms(unittest.TestCase):
specs, MUTATION_RATE)
# Generate the initial generations.
- generation_tasks = _GenerateRandomGATasks(specs, NUM_CHROMOSOMES,
- NUM_TRIALS)
+ generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES,
+ NUM_TRIALS)
generations = [GAGeneration(generation_tasks, set([]), 0)]
# Test the algorithm.