aboutsummaryrefslogtreecommitdiff
path: root/catapult/common/py_utils/py_utils
diff options
context:
space:
mode:
Diffstat (limited to 'catapult/common/py_utils/py_utils')
-rw-r--r--catapult/common/py_utils/py_utils/__init__.py16
-rw-r--r--catapult/common/py_utils/py_utils/atexit_with_log.py21
-rw-r--r--catapult/common/py_utils/py_utils/camel_case.py30
-rw-r--r--catapult/common/py_utils/py_utils/camel_case_unittest.py50
-rw-r--r--catapult/common/py_utils/py_utils/chrome_binaries.json58
-rw-r--r--catapult/common/py_utils/py_utils/class_util.py26
-rw-r--r--catapult/common/py_utils/py_utils/class_util_unittest.py138
-rw-r--r--catapult/common/py_utils/py_utils/cloud_storage.py36
-rw-r--r--catapult/common/py_utils/py_utils/cloud_storage_unittest.py228
-rw-r--r--catapult/common/py_utils/py_utils/discover.py191
-rw-r--r--catapult/common/py_utils/py_utils/discover_unittest.py146
-rw-r--r--catapult/common/py_utils/py_utils/expectations_parser.py124
-rw-r--r--catapult/common/py_utils/py_utils/expectations_parser_unittest.py165
-rw-r--r--catapult/common/py_utils/py_utils/logging_util.py35
-rw-r--r--catapult/common/py_utils/py_utils/logging_util_unittest.py23
-rwxr-xr-xcatapult/common/py_utils/py_utils/memory_debug.py83
-rw-r--r--catapult/common/py_utils/py_utils/py_utils_unittest.py1
-rw-r--r--catapult/common/py_utils/py_utils/retry_util.py57
-rw-r--r--catapult/common/py_utils/py_utils/retry_util_unittest.py118
-rw-r--r--catapult/common/py_utils/py_utils/slots_metaclass.py27
-rw-r--r--catapult/common/py_utils/py_utils/slots_metaclass_unittest.py41
-rw-r--r--catapult/common/py_utils/py_utils/tempfile_ext.py5
-rw-r--r--catapult/common/py_utils/py_utils/test_data/discoverable_classes/__init__.py3
-rw-r--r--catapult/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py33
-rw-r--r--catapult/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py9
-rw-r--r--catapult/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py11
26 files changed, 1577 insertions, 98 deletions
diff --git a/catapult/common/py_utils/py_utils/__init__.py b/catapult/common/py_utils/py_utils/__init__.py
index 3f263119..fba0897c 100644
--- a/catapult/common/py_utils/py_utils/__init__.py
+++ b/catapult/common/py_utils/py_utils/__init__.py
@@ -9,6 +9,7 @@ import inspect
import os
import sys
import time
+import platform
def GetCatapultDir():
@@ -27,6 +28,21 @@ def IsRunningOnCrosDevice():
return False
+def GetHostOsName():
+ if IsRunningOnCrosDevice():
+ return 'chromeos'
+ elif sys.platform.startswith('linux'):
+ return 'linux'
+ elif sys.platform == 'darwin':
+ return 'mac'
+ elif sys.platform == 'win32':
+ return 'win'
+
+
+def GetHostArchName():
+ return platform.machine()
+
+
def _ExecutableExtensions():
# pathext is, e.g. '.com;.exe;.bat;.cmd'
exts = os.getenv('PATHEXT').split(';') #e.g. ['.com','.exe','.bat','.cmd']
diff --git a/catapult/common/py_utils/py_utils/atexit_with_log.py b/catapult/common/py_utils/py_utils/atexit_with_log.py
new file mode 100644
index 00000000..f217c094
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/atexit_with_log.py
@@ -0,0 +1,21 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import atexit
+import logging
+
+
+def _WrapFunction(function):
+ def _WrappedFn(*args, **kwargs):
+ logging.debug('Try running %s', repr(function))
+ try:
+ function(*args, **kwargs)
+ logging.debug('Did run %s', repr(function))
+ except Exception: # pylint: disable=broad-except
+ logging.exception('Exception running %s', repr(function))
+ return _WrappedFn
+
+
+def Register(function, *args, **kwargs):
+ atexit.register(_WrapFunction(function), *args, **kwargs)
diff --git a/catapult/common/py_utils/py_utils/camel_case.py b/catapult/common/py_utils/py_utils/camel_case.py
new file mode 100644
index 00000000..9a768902
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/camel_case.py
@@ -0,0 +1,30 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+
+def ToUnderscore(obj):
+ """Converts a string, list, or dict from camelCase to lower_with_underscores.
+
+ Descends recursively into lists and dicts, converting all dict keys.
+ Returns a newly allocated object of the same structure as the input.
+ """
+ if isinstance(obj, basestring):
+ return re.sub('(?!^)([A-Z]+)', r'_\1', obj).lower()
+
+ elif isinstance(obj, list):
+ return [ToUnderscore(item) for item in obj]
+
+ elif isinstance(obj, dict):
+ output = {}
+ for k, v in obj.iteritems():
+ if isinstance(v, list) or isinstance(v, dict):
+ output[ToUnderscore(k)] = ToUnderscore(v)
+ else:
+ output[ToUnderscore(k)] = v
+ return output
+
+ else:
+ return obj
diff --git a/catapult/common/py_utils/py_utils/camel_case_unittest.py b/catapult/common/py_utils/py_utils/camel_case_unittest.py
new file mode 100644
index 00000000..c748ba2f
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/camel_case_unittest.py
@@ -0,0 +1,50 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from py_utils import camel_case
+
+
+class CamelCaseTest(unittest.TestCase):
+
+ def testString(self):
+ self.assertEqual(camel_case.ToUnderscore('camelCase'), 'camel_case')
+ self.assertEqual(camel_case.ToUnderscore('CamelCase'), 'camel_case')
+ self.assertEqual(camel_case.ToUnderscore('Camel2Case'), 'camel2_case')
+ self.assertEqual(camel_case.ToUnderscore('Camel2Case2'), 'camel2_case2')
+ self.assertEqual(camel_case.ToUnderscore('2012Q3'), '2012_q3')
+
+ def testList(self):
+ camel_case_list = ['CamelCase', ['NestedList']]
+ underscore_list = ['camel_case', ['nested_list']]
+ self.assertEqual(camel_case.ToUnderscore(camel_case_list), underscore_list)
+
+ def testDict(self):
+ camel_case_dict = {
+ 'gpu': {
+ 'vendorId': 1000,
+ 'deviceId': 2000,
+ 'vendorString': 'aString',
+ 'deviceString': 'bString'},
+ 'secondaryGpus': [
+ {'vendorId': 3000, 'deviceId': 4000,
+ 'vendorString': 'k', 'deviceString': 'l'}
+ ]
+ }
+ underscore_dict = {
+ 'gpu': {
+ 'vendor_id': 1000,
+ 'device_id': 2000,
+ 'vendor_string': 'aString',
+ 'device_string': 'bString'},
+ 'secondary_gpus': [
+ {'vendor_id': 3000, 'device_id': 4000,
+ 'vendor_string': 'k', 'device_string': 'l'}
+ ]
+ }
+ self.assertEqual(camel_case.ToUnderscore(camel_case_dict), underscore_dict)
+
+ def testOther(self):
+ self.assertEqual(camel_case.ToUnderscore(self), self)
diff --git a/catapult/common/py_utils/py_utils/chrome_binaries.json b/catapult/common/py_utils/py_utils/chrome_binaries.json
index bb0b2e10..ce357c7e 100644
--- a/catapult/common/py_utils/py_utils/chrome_binaries.json
+++ b/catapult/common/py_utils/py_utils/chrome_binaries.json
@@ -6,22 +6,22 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"mac_x86_64": {
- "cloud_storage_hash": "ab33866d00fb0c9d6543c20a21da5f047ba6a7b6",
+ "cloud_storage_hash": "b321a01b2c98fe62b1876655b10436c2226b1b76",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
- "version_in_cs": "58.0.3004.0"
+ "version_in_cs": "62.0.3194.0"
},
"win_AMD64": {
- "cloud_storage_hash": "2348f9bcf421fa4739493a12b4c8e3210a528d84",
+ "cloud_storage_hash": "2da1c7861745ab0e8f666f119eeb58c1410710cc",
"download_path": "bin\\reference_build\\chrome-win64-pgo.zip",
"path_within_archive": "chrome-win64-pgo\\chrome.exe",
- "version_in_cs": "58.0.3004.0"
+ "version_in_cs": "62.0.3194.0"
},
"win_x86": {
- "cloud_storage_hash": "421c59cfbc02bee9b74f68869af3a930b5988e71",
+ "cloud_storage_hash": "270abd11621386be612af02b707844cba06c0dbd",
"download_path": "bin\\reference_build\\chrome-win32-pgo.zip",
"path_within_archive": "chrome-win32-pgo\\chrome.exe",
- "version_in_cs": "58.0.3004.0"
+ "version_in_cs": "62.0.3194.0"
}
}
},
@@ -30,10 +30,10 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
- "cloud_storage_hash": "d61c3c5a81dc5e0a896589ab3d72a253e4b9cc90",
+ "cloud_storage_hash": "2592ec6f8dd56227c3c281e3cccecd6c9ba72cad",
"download_path": "bin/reference_build/chrome-linux64.zip",
- "path_within_archive": "chrome-precise64/chrome",
- "version_in_cs": "58.0.3000.4"
+ "path_within_archive": "chrome-linux64/chrome",
+ "version_in_cs": "62.0.3192.0"
}
}
},
@@ -42,45 +42,45 @@
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"android_k_armeabi-v7a": {
- "cloud_storage_hash": "2f0629a395974793c3a25e6c2dc971487742bd49",
+ "cloud_storage_hash": "948c776335d3a38d6e8de0dba576e109c6b5724c",
"download_path": "bin/reference_build/android_k_armeabi-v7a/ChromeStable.apk",
- "version_in_cs": "56.0.2924.87"
+ "version_in_cs": "63.0.3239.111"
},
"android_l_arm64-v8a": {
- "cloud_storage_hash": "03306b04e49ed3b0c4c29da84a128d76659624f2",
+ "cloud_storage_hash": "a25663aad7397002f6dfe44fb97087fdd77df119",
"download_path": "bin/reference_build/android_l_arm64-v8a/ChromeStable.apk",
- "version_in_cs": "56.0.2924.87"
+ "version_in_cs": "63.0.3239.111"
},
"android_l_armeabi-v7a": {
- "cloud_storage_hash": "2f0629a395974793c3a25e6c2dc971487742bd49",
+ "cloud_storage_hash": "948c776335d3a38d6e8de0dba576e109c6b5724c",
"download_path": "bin/reference_build/android_l_armeabi-v7a/ChromeStable.apk",
- "version_in_cs": "56.0.2924.87"
+ "version_in_cs": "63.0.3239.111"
},
"linux_x86_64": {
- "cloud_storage_hash": "07dd594d89c8350978ed368b55204d7ee3641001",
+ "cloud_storage_hash": "b0506e43d268eadb887ccc847695674f9d2e51a5",
"download_path": "bin/reference_build/chrome-linux64.zip",
- "path_within_archive": "chrome-precise64/chrome",
- "version_in_cs": "56.0.2924.87"
+ "path_within_archive": "chrome-linux64/chrome",
+ "version_in_cs": "63.0.3239.108"
},
"mac_x86_64": {
- "cloud_storage_hash": "e2e1ac31913ab6976084375540e17bbaa2401820",
+ "cloud_storage_hash": "56a3de45b37b7eb563006c30a548a48928cffb39",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
- "version_in_cs": "56.0.2924.87"
+ "version_in_cs": "63.0.3239.108"
},
"win_AMD64": {
- "cloud_storage_hash": "d6624303bceff81503db78c4ad17d1ebd1af7b68",
- "download_path": "bin\\reference_build\\chrome-win64-pgo.zip",
- "path_within_archive": "chrome-win64-pgo\\chrome.exe",
- "version_in_cs": "56.0.2924.87"
+ "cloud_storage_hash": "d1511334055c88fd9fa5e6e63fee666d9be8c433",
+ "download_path": "bin\\reference_build\\chrome-win64.zip",
+ "path_within_archive": "chrome-win64\\chrome.exe",
+ "version_in_cs": "63.0.3239.108"
},
"win_x86": {
- "cloud_storage_hash": "2b89d4571eeac4d80d4a1fac7c92db3f7e0bd565",
- "download_path": "bin\\reference_build\\chrome-win32-pgo.zip",
- "path_within_archive": "chrome-win32-pgo\\chrome.exe",
- "version_in_cs": "56.0.2924.87"
+ "cloud_storage_hash": "9e869b3b25ee7b682712cde6eaddc2d7fa84cc90",
+ "download_path": "bin\\reference_build\\chrome-win32.zip",
+ "path_within_archive": "chrome-win32\\chrome.exe",
+ "version_in_cs": "63.0.3239.108"
}
}
}
}
-} \ No newline at end of file
+}
diff --git a/catapult/common/py_utils/py_utils/class_util.py b/catapult/common/py_utils/py_utils/class_util.py
new file mode 100644
index 00000000..4cec4300
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/class_util.py
@@ -0,0 +1,26 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import inspect
+
+def IsMethodOverridden(parent_cls, child_cls, method_name):
+ assert inspect.isclass(parent_cls), '%s should be a class' % parent_cls
+ assert inspect.isclass(child_cls), '%s should be a class' % child_cls
+ assert parent_cls.__dict__.get(method_name), '%s has no method %s' % (
+ parent_cls, method_name)
+
+ if child_cls.__dict__.get(method_name):
+ # It's overridden
+ return True
+
+ if parent_cls in child_cls.__bases__:
+ # The parent is the base class of the child, we did not find the
+ # overridden method.
+ return False
+
+ # For all the base classes of this class that are not object, check if
+ # they override the method.
+ base_cls = [cls for cls in child_cls.__bases__ if cls and cls != object]
+ return any(
+ IsMethodOverridden(parent_cls, base, method_name) for base in base_cls)
diff --git a/catapult/common/py_utils/py_utils/class_util_unittest.py b/catapult/common/py_utils/py_utils/class_util_unittest.py
new file mode 100644
index 00000000..938bcdc7
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/class_util_unittest.py
@@ -0,0 +1,138 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from py_utils import class_util
+
+
+class ClassUtilTest(unittest.TestCase):
+
+ def testClassOverridden(self):
+ class Parent(object):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Child(Parent):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ self.assertTrue(class_util.IsMethodOverridden(
+ Parent, Child, 'MethodShouldBeOverridden'))
+
+ def testGrandchildOverridden(self):
+ class Parent(object):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Child(Parent):
+ pass
+
+ class Grandchild(Child):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ self.assertTrue(class_util.IsMethodOverridden(
+ Parent, Grandchild, 'MethodShouldBeOverridden'))
+
+ def testClassNotOverridden(self):
+ class Parent(object):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Child(Parent):
+ def SomeOtherMethod(self):
+ pass
+
+ self.assertFalse(class_util.IsMethodOverridden(
+ Parent, Child, 'MethodShouldBeOverridden'))
+
+ def testGrandchildNotOverridden(self):
+ class Parent(object):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Child(Parent):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Grandchild(Child):
+ def SomeOtherMethod(self):
+ pass
+
+ self.assertTrue(class_util.IsMethodOverridden(
+ Parent, Grandchild, 'MethodShouldBeOverridden'))
+
+ def testClassNotPresentInParent(self):
+ class Parent(object):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ class Child(Parent):
+ def MethodShouldBeOverridden(self):
+ pass
+
+ self.assertRaises(
+ AssertionError, class_util.IsMethodOverridden,
+ Parent, Child, 'WrongMethod')
+
+ def testInvalidClass(self):
+ class Foo(object):
+ def Bar(self):
+ pass
+
+ self.assertRaises(
+ AssertionError, class_util.IsMethodOverridden, 'invalid', Foo, 'Bar')
+
+ self.assertRaises(
+ AssertionError, class_util.IsMethodOverridden, Foo, 'invalid', 'Bar')
+
+ def testMultipleInheritance(self):
+ class Aaa(object):
+ def One(self):
+ pass
+
+ class Bbb(object):
+ def Two(self):
+ pass
+
+ class Ccc(Aaa, Bbb):
+ pass
+
+ class Ddd(object):
+ def Three(self):
+ pass
+
+ class Eee(Ddd):
+ def Three(self):
+ pass
+
+ class Fff(Ccc, Eee):
+ def One(self):
+ pass
+
+ class Ggg(object):
+ def Four(self):
+ pass
+
+ class Hhh(Fff, Ggg):
+ def Two(self):
+ pass
+
+ class Iii(Hhh):
+ pass
+
+ class Jjj(Iii):
+ pass
+
+ self.assertFalse(class_util.IsMethodOverridden(Aaa, Ccc, 'One'))
+ self.assertTrue(class_util.IsMethodOverridden(Aaa, Fff, 'One'))
+ self.assertTrue(class_util.IsMethodOverridden(Aaa, Hhh, 'One'))
+ self.assertTrue(class_util.IsMethodOverridden(Aaa, Jjj, 'One'))
+ self.assertFalse(class_util.IsMethodOverridden(Bbb, Ccc, 'Two'))
+ self.assertTrue(class_util.IsMethodOverridden(Bbb, Hhh, 'Two'))
+ self.assertTrue(class_util.IsMethodOverridden(Bbb, Jjj, 'Two'))
+ self.assertFalse(class_util.IsMethodOverridden(Eee, Fff, 'Three'))
+
+
diff --git a/catapult/common/py_utils/py_utils/cloud_storage.py b/catapult/common/py_utils/py_utils/cloud_storage.py
index 7bc9a197..f6013806 100644
--- a/catapult/common/py_utils/py_utils/cloud_storage.py
+++ b/catapult/common/py_utils/py_utils/cloud_storage.py
@@ -15,6 +15,7 @@ import subprocess
import re
import sys
import tempfile
+import time
import py_utils
from py_utils import lock
@@ -412,14 +413,49 @@ def GetIfChanged(file_path, bucket):
"""
with _FileLock(file_path):
hash_path = file_path + '.sha1'
+ fetch_ts_path = file_path + '.fetchts'
if not os.path.exists(hash_path):
logger.warning('Hash file not found: %s', hash_path)
return False
expected_hash = ReadHash(hash_path)
+
+ # To save the time required computing binary hash (which is an expensive
+ # operation, see crbug.com/793609#c2 for details), any time we fetch a new
+ # binary, we save not only that binary but the time of the fetch in
+ # |fetch_ts_path|. Anytime the file needs updated (its
+ # hash in |hash_path| change), we can just need to compare the timestamp of
+ # |hash_path| with the timestamp in |fetch_ts_path| to figure out
+ # if the update operation has been done.
+ #
+ # Notes: for this to work, we make the assumption that only
+ # cloud_storage.GetIfChanged modifies the local |file_path| binary.
+
+ if os.path.exists(fetch_ts_path) and os.path.exists(file_path):
+ with open(fetch_ts_path) as f:
+ data = f.read().strip()
+ last_binary_fetch_ts = float(data)
+
+ if last_binary_fetch_ts > os.path.getmtime(hash_path):
+ return False
+
+ # Whether the binary stored in local already has hash matched
+ # expected_hash or we need to fetch new binary from cloud, update the
+ # timestamp in |fetch_ts_path| with current time anyway since it is
+ # outdated compared with sha1's last modified time.
+ with open(fetch_ts_path, 'w') as f:
+ f.write(str(time.time()))
+
if os.path.exists(file_path) and CalculateHash(file_path) == expected_hash:
return False
_GetLocked(bucket, expected_hash, file_path)
+ if CalculateHash(file_path) != expected_hash:
+ os.remove(fetch_ts_path)
+ raise RuntimeError(
+ 'Binary stored in cloud storage does not have hash matching .sha1 '
+ 'file. Please make sure that the binary file is uploaded using '
+ 'depot_tools/upload_to_google_storage.py script or through automatic '
+ 'framework.')
return True
diff --git a/catapult/common/py_utils/py_utils/cloud_storage_unittest.py b/catapult/common/py_utils/py_utils/cloud_storage_unittest.py
index a513b262..ae2f7482 100644
--- a/catapult/common/py_utils/py_utils/cloud_storage_unittest.py
+++ b/catapult/common/py_utils/py_utils/cloud_storage_unittest.py
@@ -30,7 +30,7 @@ def _FakeCalulateHashNewHash(_):
return 'omgnewhash'
-class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
+class BaseFakeFsUnitTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.original_environ = os.environ.copy()
@@ -54,6 +54,9 @@ class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
def _FakeGet(self, bucket, remote_path, local_path):
pass
+
+class CloudStorageFakeFsUnitTest(BaseFakeFsUnitTest):
+
def _AssertRunCommandRaisesError(self, communicate_strs, error):
with mock.patch('py_utils.cloud_storage.subprocess.Popen') as popen:
p_mock = mock.Mock()
@@ -110,73 +113,6 @@ class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
self.assertFalse(cloud_storage.Exists('fake bucket',
'fake remote path'))
- @mock.patch('py_utils.cloud_storage.CalculateHash')
- @mock.patch('py_utils.cloud_storage._GetLocked')
- @mock.patch('py_utils.cloud_storage._FileLock')
- @mock.patch('py_utils.cloud_storage.os.path')
- def testGetIfHashChanged(self, path_mock, unused_lock_mock, get_mock,
- calc_hash_mock):
- path_mock.exists.side_effect = [False, True, True]
- calc_hash_mock.return_value = 'hash'
-
- # The file at |local_path| doesn't exist. We should download file from cs.
- ret = cloud_storage.GetIfHashChanged(
- 'remote_path', 'local_path', 'cs_bucket', 'hash')
- self.assertTrue(ret)
- get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
- get_mock.reset_mock()
- self.assertFalse(calc_hash_mock.call_args)
- calc_hash_mock.reset_mock()
-
- # A local file exists at |local_path| but has the wrong hash.
- # We should download file from cs.
- ret = cloud_storage.GetIfHashChanged(
- 'remote_path', 'local_path', 'cs_bucket', 'new_hash')
- self.assertTrue(ret)
- get_mock.assert_called_once_with('cs_bucket', 'remote_path', 'local_path')
- get_mock.reset_mock()
- calc_hash_mock.assert_called_once_with('local_path')
- calc_hash_mock.reset_mock()
-
- # Downloaded file exists locally and has the right hash. Don't download.
- ret = cloud_storage.GetIfHashChanged(
- 'remote_path', 'local_path', 'cs_bucket', 'hash')
- self.assertFalse(get_mock.call_args)
- self.assertFalse(ret)
- calc_hash_mock.reset_mock()
- get_mock.reset_mock()
-
- @mock.patch('py_utils.cloud_storage._FileLock')
- def testGetIfChanged(self, unused_lock_mock):
- orig_get = cloud_storage._GetLocked
- orig_read_hash = cloud_storage.ReadHash
- orig_calculate_hash = cloud_storage.CalculateHash
- cloud_storage.ReadHash = _FakeReadHash
- cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
- file_path = 'test-file-path.wpr'
- hash_path = file_path + '.sha1'
- try:
- cloud_storage._GetLocked = self._FakeGet
- # hash_path doesn't exist.
- self.assertFalse(cloud_storage.GetIfChanged(file_path,
- cloud_storage.PUBLIC_BUCKET))
- # hash_path exists, but file_path doesn't.
- self.CreateFiles([hash_path])
- self.assertTrue(cloud_storage.GetIfChanged(file_path,
- cloud_storage.PUBLIC_BUCKET))
- # hash_path and file_path exist, and have same hash.
- self.CreateFiles([file_path])
- self.assertFalse(cloud_storage.GetIfChanged(file_path,
- cloud_storage.PUBLIC_BUCKET))
- # hash_path and file_path exist, and have different hashes.
- cloud_storage.CalculateHash = _FakeCalulateHashNewHash
- self.assertTrue(cloud_storage.GetIfChanged(file_path,
- cloud_storage.PUBLIC_BUCKET))
- finally:
- cloud_storage._GetLocked = orig_get
- cloud_storage.CalculateHash = orig_calculate_hash
- cloud_storage.ReadHash = orig_read_hash
-
@unittest.skipIf(sys.platform.startswith('win'),
'https://github.com/catapult-project/catapult/issues/1861')
def testGetFilesInDirectoryIfChanged(self):
@@ -218,7 +154,6 @@ class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
finally:
cloud_storage._RunCommand = orig_run_command
-
@mock.patch('py_utils.cloud_storage._FileLock')
def testDisableCloudStorageIo(self, unused_lock_mock):
os.environ['DISABLE_CLOUD_STORAGE_IO'] = '1'
@@ -226,6 +161,10 @@ class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
self.fs.CreateDirectory(dir_path)
file_path = os.path.join(dir_path, 'file1')
file_path_sha = file_path + '.sha1'
+
+ def CleanTimeStampFile():
+ os.remove(file_path + '.fetchts')
+
self.CreateFiles([file_path, file_path_sha])
with open(file_path_sha, 'w') as f:
f.write('hash1234')
@@ -239,10 +178,161 @@ class CloudStorageFakeFsUnitTest(fake_filesystem_unittest.TestCase):
cloud_storage.GetIfHashChanged('bar', file_path, 'bucket', 'hash1234')
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.Insert('bucket', 'foo', file_path)
+
+ CleanTimeStampFile()
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.GetFilesInDirectoryIfChanged(dir_path, 'bucket')
+class GetIfChangedTests(BaseFakeFsUnitTest):
+
+ def setUp(self):
+ super(GetIfChangedTests, self).setUp()
+ self._orig_read_hash = cloud_storage.ReadHash
+ self._orig_calculate_hash = cloud_storage.CalculateHash
+
+ def tearDown(self):
+ super(GetIfChangedTests, self).tearDown()
+ cloud_storage.CalculateHash = self._orig_calculate_hash
+ cloud_storage.ReadHash = self._orig_read_hash
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testHashPathDoesNotExists(self, unused_get_locked, unused_lock_mock):
+ cloud_storage.ReadHash = _FakeReadHash
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+ file_path = 'test-file-path.wpr'
+
+ cloud_storage._GetLocked = self._FakeGet
+ # hash_path doesn't exist.
+ self.assertFalse(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testHashPathExistsButFilePathDoesNot(
+ self, unused_get_locked, unused_lock_mock):
+ cloud_storage.ReadHash = _FakeReadHash
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+ file_path = 'test-file-path.wpr'
+ hash_path = file_path + '.sha1'
+
+ # hash_path exists, but file_path doesn't.
+ self.CreateFiles([hash_path])
+ self.assertTrue(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testHashPathAndFileHashExistWithSameHash(
+ self, unused_get_locked, unused_lock_mock):
+ cloud_storage.ReadHash = _FakeReadHash
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+ file_path = 'test-file-path.wpr'
+
+ # hash_path and file_path exist, and have same hash.
+ self.CreateFiles([file_path])
+ self.assertFalse(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testHashPathAndFileHashExistWithDifferentHash(
+ self, mock_get_locked, unused_get_locked):
+ cloud_storage.ReadHash = _FakeReadHash
+ cloud_storage.CalculateHash = _FakeCalulateHashNewHash
+ file_path = 'test-file-path.wpr'
+ hash_path = file_path + '.sha1'
+
+ def _FakeGetLocked(bucket, expected_hash, file_path):
+ del bucket, expected_hash, file_path # unused
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+
+ mock_get_locked.side_effect = _FakeGetLocked
+
+ self.CreateFiles([file_path, hash_path])
+ # hash_path and file_path exist, and have different hashes.
+ self.assertTrue(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage.CalculateHash')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testNoHashComputationNeededUponSecondCall(
+ self, mock_get_locked, mock_calculate_hash, unused_get_locked):
+ mock_calculate_hash.side_effect = _FakeCalulateHashNewHash
+ cloud_storage.ReadHash = _FakeReadHash
+ file_path = 'test-file-path.wpr'
+ hash_path = file_path + '.sha1'
+
+ def _FakeGetLocked(bucket, expected_hash, file_path):
+ del bucket, expected_hash, file_path # unused
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+
+ mock_get_locked.side_effect = _FakeGetLocked
+
+ self.CreateFiles([file_path, hash_path])
+ # hash_path and file_path exist, and have different hashes. This first call
+ # will invoke a fetch.
+ self.assertTrue(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ # The fetch left a .fetchts file on machine.
+ self.assertTrue(os.path.exists(file_path + '.fetchts'))
+
+ # Subsequent invocations of GetIfChanged should not invoke CalculateHash.
+ mock_calculate_hash.assert_not_called()
+ self.assertFalse(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+ self.assertFalse(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ @mock.patch('py_utils.cloud_storage._FileLock')
+ @mock.patch('py_utils.cloud_storage.CalculateHash')
+ @mock.patch('py_utils.cloud_storage._GetLocked')
+ def testRefetchingFileUponHashFileChange(
+ self, mock_get_locked, mock_calculate_hash, unused_get_locked):
+ mock_calculate_hash.side_effect = _FakeCalulateHashNewHash
+ cloud_storage.ReadHash = _FakeReadHash
+ file_path = 'test-file-path.wpr'
+ hash_path = file_path + '.sha1'
+
+ def _FakeGetLocked(bucket, expected_hash, file_path):
+ del bucket, expected_hash, file_path # unused
+ cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
+
+ mock_get_locked.side_effect = _FakeGetLocked
+
+ self.CreateFiles([file_path, hash_path])
+ # hash_path and file_path exist, and have different hashes. This first call
+ # will invoke a fetch.
+ self.assertTrue(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+ # The fetch left a .fetchts file on machine.
+ self.assertTrue(os.path.exists(file_path + '.fetchts'))
+
+ with open(file_path + '.fetchts') as f:
+ fetchts = float(f.read())
+
+ # Updating the .sha1 hash_path file with the new hash after .fetchts
+ # is created.
+ file_obj = self.fs.GetObject(hash_path)
+ file_obj.SetMTime(fetchts + 100)
+
+ cloud_storage.ReadHash = lambda _: 'hashNeW'
+ def _FakeGetLockedNewHash(bucket, expected_hash, file_path):
+ del bucket, expected_hash, file_path # unused
+ cloud_storage.CalculateHash = lambda _: 'hashNeW'
+
+ mock_get_locked.side_effect = _FakeGetLockedNewHash
+
+ # hash_path and file_path exist, and have different hashes. This first call
+ # will invoke a fetch.
+ self.assertTrue(cloud_storage.GetIfChanged(file_path,
+ cloud_storage.PUBLIC_BUCKET))
+
+
class CloudStorageRealFsUnitTest(unittest.TestCase):
def setUp(self):
diff --git a/catapult/common/py_utils/py_utils/discover.py b/catapult/common/py_utils/py_utils/discover.py
new file mode 100644
index 00000000..09d5c5e2
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/discover.py
@@ -0,0 +1,191 @@
+# Copyright 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import fnmatch
+import importlib
+import inspect
+import os
+import re
+import sys
+
+from py_utils import camel_case
+
+
+def DiscoverModules(start_dir, top_level_dir, pattern='*'):
+ """Discover all modules in |start_dir| which match |pattern|.
+
+ Args:
+ start_dir: The directory to recursively search.
+ top_level_dir: The top level of the package, for importing.
+ pattern: Unix shell-style pattern for filtering the filenames to import.
+
+ Returns:
+ list of modules.
+ """
+ # start_dir and top_level_dir must be consistent with each other.
+ start_dir = os.path.realpath(start_dir)
+ top_level_dir = os.path.realpath(top_level_dir)
+
+ modules = []
+ sub_paths = list(os.walk(start_dir))
+ # We sort the directories & file paths to ensure a deterministic ordering when
+ # traversing |top_level_dir|.
+ sub_paths.sort(key=lambda paths_tuple: paths_tuple[0])
+ for dir_path, _, filenames in sub_paths:
+ # Sort the directories to walk recursively by the directory path.
+ filenames.sort()
+ for filename in filenames:
+ # Filter out unwanted filenames.
+ if filename.startswith('.') or filename.startswith('_'):
+ continue
+ if os.path.splitext(filename)[1] != '.py':
+ continue
+ if not fnmatch.fnmatch(filename, pattern):
+ continue
+
+ # Find the module.
+ module_rel_path = os.path.relpath(
+ os.path.join(dir_path, filename), top_level_dir)
+ module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
+
+ # Import the module.
+ try:
+ # Make sure that top_level_dir is the first path in the sys.path in case
+ # there are naming conflict in module parts.
+ original_sys_path = sys.path[:]
+ sys.path.insert(0, top_level_dir)
+ module = importlib.import_module(module_name)
+ modules.append(module)
+ finally:
+ sys.path = original_sys_path
+ return modules
+
+
+def AssertNoKeyConflicts(classes_by_key_1, classes_by_key_2):
+ for k in classes_by_key_1:
+ if k in classes_by_key_2:
+ assert classes_by_key_1[k] is classes_by_key_2[k], (
+ 'Found conflicting classes for the same key: '
+ 'key=%s, class_1=%s, class_2=%s' % (
+ k, classes_by_key_1[k], classes_by_key_2[k]))
+
+
+# TODO(dtu): Normalize all discoverable classes to have corresponding module
+# and class names, then always index by class name.
+def DiscoverClasses(start_dir,
+ top_level_dir,
+ base_class,
+ pattern='*',
+ index_by_class_name=True,
+ directly_constructable=False):
+ """Discover all classes in |start_dir| which subclass |base_class|.
+
+ Base classes that contain subclasses are ignored by default.
+
+ Args:
+ start_dir: The directory to recursively search.
+ top_level_dir: The top level of the package, for importing.
+ base_class: The base class to search for.
+ pattern: Unix shell-style pattern for filtering the filenames to import.
+ index_by_class_name: If True, use class name converted to
+ lowercase_with_underscores instead of module name in return dict keys.
+ directly_constructable: If True, will only return classes that can be
+ constructed without arguments
+
+ Returns:
+ dict of {module_name: class} or {underscored_class_name: class}
+ """
+ modules = DiscoverModules(start_dir, top_level_dir, pattern)
+ classes = {}
+ for module in modules:
+ new_classes = DiscoverClassesInModule(
+ module, base_class, index_by_class_name, directly_constructable)
+ # TODO(nednguyen): we should remove index_by_class_name once
+ # benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
+ # naming collisions to reduce the number of smoked benchmark tests.
+ # crbug.com/548652
+ if index_by_class_name:
+ AssertNoKeyConflicts(classes, new_classes)
+ classes = dict(classes.items() + new_classes.items())
+ return classes
+
+
+# TODO(nednguyen): we should remove index_by_class_name once
+# benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
+# naming collisions to reduce the number of smoked benchmark tests.
+# crbug.com/548652
+def DiscoverClassesInModule(module,
+ base_class,
+ index_by_class_name=False,
+ directly_constructable=False):
+ """Discover all classes in |module| which subclass |base_class|.
+
+ Base classes that contain subclasses are ignored by default.
+
+ Args:
+ module: The module to search.
+ base_class: The base class to search for.
+ index_by_class_name: If True, use class name converted to
+ lowercase_with_underscores instead of module name in return dict keys.
+
+ Returns:
+ dict of {module_name: class} or {underscored_class_name: class}
+ """
+ classes = {}
+ for _, obj in inspect.getmembers(module):
+ # Ensure object is a class.
+ if not inspect.isclass(obj):
+ continue
+ # Include only subclasses of base_class.
+ if not issubclass(obj, base_class):
+ continue
+ # Exclude the base_class itself.
+ if obj is base_class:
+ continue
+ # Exclude protected or private classes.
+ if obj.__name__.startswith('_'):
+ continue
+ # Include only the module in which the class is defined.
+ # If a class is imported by another module, exclude those duplicates.
+ if obj.__module__ != module.__name__:
+ continue
+
+ if index_by_class_name:
+ key_name = camel_case.ToUnderscore(obj.__name__)
+ else:
+ key_name = module.__name__.split('.')[-1]
+ if not directly_constructable or IsDirectlyConstructable(obj):
+ if key_name in classes and index_by_class_name:
+ assert classes[key_name] is obj, (
+ 'Duplicate key_name with different objs detected: '
+ 'key=%s, obj1=%s, obj2=%s' % (key_name, classes[key_name], obj))
+ else:
+ classes[key_name] = obj
+
+ return classes
+
+
+def IsDirectlyConstructable(cls):
+ """Returns True if instance of |cls| can be construct without arguments."""
+ assert inspect.isclass(cls)
+ if not hasattr(cls, '__init__'):
+ # Case |class A: pass|.
+ return True
+ if cls.__init__ is object.__init__:
+ # Case |class A(object): pass|.
+ return True
+ # Case |class (object):| with |__init__| other than |object.__init__|.
+ args, _, _, defaults = inspect.getargspec(cls.__init__)
+ if defaults is None:
+ defaults = ()
+ # Return true if |self| is only arg without a default.
+ return len(args) == len(defaults) + 1
+
+
+_COUNTER = [0]
+
+
+def _GetUniqueModuleName():
+ _COUNTER[0] += 1
+ return "module_" + str(_COUNTER[0])
diff --git a/catapult/common/py_utils/py_utils/discover_unittest.py b/catapult/common/py_utils/py_utils/discover_unittest.py
new file mode 100644
index 00000000..137d85f7
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/discover_unittest.py
@@ -0,0 +1,146 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import os
+import unittest
+
+from py_utils import discover
+
+
+class DiscoverTest(unittest.TestCase):
+
+ def setUp(self):
+ self._base_dir = os.path.join(os.path.dirname(__file__), 'test_data')
+ self._start_dir = os.path.join(self._base_dir, 'discoverable_classes')
+ self._base_class = Exception
+
+ def testDiscoverClassesWithIndexByModuleName(self):
+ classes = discover.DiscoverClasses(self._start_dir,
+ self._base_dir,
+ self._base_class,
+ index_by_class_name=False)
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1',
+ 'discover_dummyclass': 'DummyException',
+ 'parameter_discover_dummyclass': 'DummyExceptionWithParameterImpl2'
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+ def testDiscoverDirectlyConstructableClassesWithIndexByClassName(self):
+ classes = discover.DiscoverClasses(self._start_dir,
+ self._base_dir,
+ self._base_class,
+ directly_constructable=True)
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'dummy_exception': 'DummyException',
+ 'dummy_exception_impl1': 'DummyExceptionImpl1',
+ 'dummy_exception_impl2': 'DummyExceptionImpl2',
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+ def testDiscoverClassesWithIndexByClassName(self):
+ classes = discover.DiscoverClasses(self._start_dir, self._base_dir,
+ self._base_class)
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'dummy_exception': 'DummyException',
+ 'dummy_exception_impl1': 'DummyExceptionImpl1',
+ 'dummy_exception_impl2': 'DummyExceptionImpl2',
+ 'dummy_exception_with_parameter_impl1':
+ 'DummyExceptionWithParameterImpl1',
+ 'dummy_exception_with_parameter_impl2':
+ 'DummyExceptionWithParameterImpl2'
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+ def testDiscoverClassesWithPatternAndIndexByModule(self):
+ classes = discover.DiscoverClasses(self._start_dir,
+ self._base_dir,
+ self._base_class,
+ pattern='another*',
+ index_by_class_name=False)
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1'
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+ def testDiscoverDirectlyConstructableClassesWithPatternAndIndexByClassName(
+ self):
+ classes = discover.DiscoverClasses(self._start_dir,
+ self._base_dir,
+ self._base_class,
+ pattern='another*',
+ directly_constructable=True)
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'dummy_exception_impl1': 'DummyExceptionImpl1',
+ 'dummy_exception_impl2': 'DummyExceptionImpl2',
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+ def testDiscoverClassesWithPatternAndIndexByClassName(self):
+ classes = discover.DiscoverClasses(self._start_dir,
+ self._base_dir,
+ self._base_class,
+ pattern='another*')
+
+ actual_classes = dict((name, cls.__name__)
+ for name, cls in classes.iteritems())
+ expected_classes = {
+ 'dummy_exception_impl1': 'DummyExceptionImpl1',
+ 'dummy_exception_impl2': 'DummyExceptionImpl2',
+ 'dummy_exception_with_parameter_impl1':
+ 'DummyExceptionWithParameterImpl1',
+ }
+ self.assertEqual(actual_classes, expected_classes)
+
+
+class ClassWithoutInitDefOne: # pylint: disable=old-style-class, no-init
+ pass
+
+
+class ClassWithoutInitDefTwo(object):
+ pass
+
+
+class ClassWhoseInitOnlyHasSelf(object):
+ def __init__(self):
+ pass
+
+
+class ClassWhoseInitWithDefaultArguments(object):
+ def __init__(self, dog=1, cat=None, cow=None, fud='a'):
+ pass
+
+
+class ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments(object):
+ def __init__(self, x, dog=1, cat=None, fish=None, fud='a'):
+ pass
+
+
+class IsDirectlyConstructableTest(unittest.TestCase):
+
+ def testIsDirectlyConstructableReturnsTrue(self):
+ self.assertTrue(discover.IsDirectlyConstructable(ClassWithoutInitDefOne))
+ self.assertTrue(discover.IsDirectlyConstructable(ClassWithoutInitDefTwo))
+ self.assertTrue(discover.IsDirectlyConstructable(ClassWhoseInitOnlyHasSelf))
+ self.assertTrue(
+ discover.IsDirectlyConstructable(ClassWhoseInitWithDefaultArguments))
+
+ def testIsDirectlyConstructableReturnsFalse(self):
+ self.assertFalse(
+ discover.IsDirectlyConstructable(
+ ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments))
diff --git a/catapult/common/py_utils/py_utils/expectations_parser.py b/catapult/common/py_utils/py_utils/expectations_parser.py
new file mode 100644
index 00000000..6fa94070
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/expectations_parser.py
@@ -0,0 +1,124 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+
+class ParseError(Exception):
+ pass
+
+
+class Expectation(object):
+ def __init__(self, reason, test, conditions, results):
+ """Constructor for expectations.
+
+ Args:
+ reason: String that indicates the reason for disabling.
+ test: String indicating which test is being disabled.
+ conditions: List of tags indicating which conditions to disable for.
+ Conditions are combined using logical and. Example: ['Mac', 'Debug']
+ results: List of outcomes for test. Example: ['Skip', 'Pass']
+ """
+ assert isinstance(reason, basestring) or reason is None
+ self._reason = reason
+ assert isinstance(test, basestring)
+ self._test = test
+ assert isinstance(conditions, list)
+ self._conditions = conditions
+ assert isinstance(results, list)
+ self._results = results
+
+ def __eq__(self, other):
+ return (self.reason == other.reason and
+ self.test == other.test and
+ self.conditions == other.conditions and
+ self.results == other.results)
+
+ @property
+ def reason(self):
+ return self._reason
+
+ @property
+ def test(self):
+ return self._test
+
+ @property
+ def conditions(self):
+ return self._conditions
+
+ @property
+ def results(self):
+ return self._results
+
+
+class TestExpectationParser(object):
+ """Parse expectations data in TA/DA format.
+
+ This parser covers the 'tagged' test lists format in:
+ bit.ly/chromium-test-list-format
+
+ Takes raw expectations data as a string read from the TA/DA expectation file
+ in the format:
+
+ # This is an example expectation file.
+ #
+ # tags: Mac Mac10.10 Mac10.11
+ # tags: Win Win8
+
+ crbug.com/123 [ Win ] benchmark/story [ Skip ]
+ ...
+ """
+
+ TAG_TOKEN = '# tags:'
+ _MATCH_STRING = r'^(?:(crbug.com/\d+) )?' # The bug field (optional).
+ _MATCH_STRING += r'(?:\[ (.+) \] )?' # The label field (optional).
+ _MATCH_STRING += r'(\S+) ' # The test path field.
+ _MATCH_STRING += r'\[ ([^\[.]+) \]' # The expectation field.
+ _MATCH_STRING += r'(\s+#.*)?$' # End comment (optional).
+ MATCHER = re.compile(_MATCH_STRING)
+
+ def __init__(self, raw_data):
+ self._tags = []
+ self._expectations = []
+ self._ParseRawExpectationData(raw_data)
+
+ def _ParseRawExpectationData(self, raw_data):
+ for count, line in list(enumerate(raw_data.splitlines(), start=1)):
+ # Handle metadata and comments.
+ if line.startswith(self.TAG_TOKEN):
+ for word in line[len(self.TAG_TOKEN):].split():
+ # Expectations must be after all tags are declared.
+ if self._expectations:
+ raise ParseError('Tag found after first expectation.')
+ self._tags.append(word)
+ elif line.startswith('#') or not line:
+ continue # Ignore, it is just a comment or empty.
+ else:
+ self._expectations.append(
+ self._ParseExpectationLine(count, line, self._tags))
+
+ def _ParseExpectationLine(self, line_number, line, tags):
+ match = self.MATCHER.match(line)
+ if not match:
+ raise ParseError(
+ 'Expectation has invalid syntax on line %d: %s'
+ % (line_number, line))
+ # Unused group is optional trailing comment.
+ reason, raw_conditions, test, results, _ = match.groups()
+ conditions = [c for c in raw_conditions.split()] if raw_conditions else []
+
+ for c in conditions:
+ if c not in tags:
+ raise ParseError(
+ 'Condition %s not found in expectations tag data. Line %d'
+ % (c, line_number))
+ return Expectation(reason, test, conditions, [r for r in results.split()])
+
+ @property
+ def expectations(self):
+ return self._expectations
+
+ @property
+ def tags(self):
+ return self._tags
diff --git a/catapult/common/py_utils/py_utils/expectations_parser_unittest.py b/catapult/common/py_utils/py_utils/expectations_parser_unittest.py
new file mode 100644
index 00000000..a842c4c9
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/expectations_parser_unittest.py
@@ -0,0 +1,165 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import unittest
+
+from py_utils import expectations_parser
+
+
+class TestExpectationParserTest(unittest.TestCase):
+
+ def testInitWithGoodData(self):
+ good_data = """
+# This is a test expectation file.
+#
+# tags: tag1 tag2 tag3
+# tags: tag4 Mac Win Debug
+
+crbug.com/12345 [ Mac ] b1/s1 [ Skip ]
+crbug.com/23456 [ Mac Debug ] b1/s2 [ Skip ]
+"""
+ parser = expectations_parser.TestExpectationParser(good_data)
+ tags = ['tag1', 'tag2', 'tag3', 'tag4', 'Mac', 'Win', 'Debug']
+ self.assertEqual(parser.tags, tags)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ 'crbug.com/12345', 'b1/s1', ['Mac'], ['Skip']),
+ expectations_parser.Expectation(
+ 'crbug.com/23456', 'b1/s2', ['Mac', 'Debug'], ['Skip'])
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testInitWithBadData(self):
+ bad_data = """
+# This is a test expectation file.
+#
+# tags: tag1 tag2 tag3
+# tags: tag4
+
+crbug.com/12345 [ Mac b1/s1 [ Skip ]
+"""
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(bad_data)
+
+ def testTagAfterExpectationsStart(self):
+ bad_data = """
+# This is a test expectation file.
+#
+# tags: tag1 tag2 tag3
+
+crbug.com/12345 [ tag1 ] b1/s1 [ Skip ]
+
+# tags: tag4
+"""
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(bad_data)
+
+ def testParseExpectationLineEverythingThere(self):
+ raw_data = '# tags: Mac\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ 'crbug.com/23456', 'b1/s2', ['Mac'], ['Skip'])
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testParseExpectationLineBadTag(self):
+ raw_data = '# tags: None\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineNoConditions(self):
+ raw_data = '# tags: All\ncrbug.com/12345 b1/s1 [ Skip ]'
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ 'crbug.com/12345', 'b1/s1', [], ['Skip']),
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testParseExpectationLineNoBug(self):
+ raw_data = '# tags: All\n[ All ] b1/s1 [ Skip ]'
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ None, 'b1/s1', ['All'], ['Skip']),
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testParseExpectationLineNoBugNoConditions(self):
+ raw_data = '# tags: All\nb1/s1 [ Skip ]'
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ None, 'b1/s1', [], ['Skip']),
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testParseExpectationLineMultipleConditions(self):
+ raw_data = ('# tags:All None Batman\n'
+ 'crbug.com/123 [ All None Batman ] b1/s1 [ Skip ]')
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ 'crbug.com/123', 'b1/s1', ['All', 'None', 'Batman'], ['Skip']),
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
+
+ def testParseExpectationLineBadConditionBracket(self):
+ raw_data = '# tags: Mac\ncrbug.com/23456 ] Mac ] b1/s2 [ Skip ]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineBadResultBracket(self):
+ raw_data = '# tags: Mac\ncrbug.com/23456 ] Mac ] b1/s2 ] Skip ]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineBadConditionBracketSpacing(self):
+ raw_data = '# tags: Mac\ncrbug.com/2345 [Mac] b1/s1 [ Skip ]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineBadResultBracketSpacing(self):
+ raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac ] b1/s1 [Skip]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineNoClosingConditionBracket(self):
+ raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac b1/s1 [ Skip ]'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineNoClosingResultBracket(self):
+ raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac ] b1/s1 [ Skip'
+ with self.assertRaises(expectations_parser.ParseError):
+ expectations_parser.TestExpectationParser(raw_data)
+
+ def testParseExpectationLineUrlInTestName(self):
+ raw_data = (
+ '# tags: Mac\ncrbug.com/123 [ Mac ] b.1/http://google.com [ Skip ]')
+ expected_outcomes = [
+ expectations_parser.Expectation(
+ 'crbug.com/123', 'b.1/http://google.com', ['Mac'], ['Skip'])
+ ]
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcomes[i])
+
+ def testParseExpectationLineEndingComment(self):
+ raw_data = '# tags: Mac\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ] # abc 123'
+ parser = expectations_parser.TestExpectationParser(raw_data)
+ expected_outcome = [
+ expectations_parser.Expectation(
+ 'crbug.com/23456', 'b1/s2', ['Mac'], ['Skip'])
+ ]
+ for i in range(len(parser.expectations)):
+ self.assertEqual(parser.expectations[i], expected_outcome[i])
diff --git a/catapult/common/py_utils/py_utils/logging_util.py b/catapult/common/py_utils/py_utils/logging_util.py
new file mode 100644
index 00000000..43578511
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/logging_util.py
@@ -0,0 +1,35 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Logging util functions.
+
+It would be named logging, but other modules in this directory use the default
+logging module, so that would break them.
+"""
+
+import contextlib
+import logging
+
+@contextlib.contextmanager
+def CaptureLogs(file_stream):
+ if not file_stream:
+ # No file stream given, just don't capture logs.
+ yield
+ return
+
+ fh = logging.StreamHandler(file_stream)
+
+ logger = logging.getLogger()
+ # Try to copy the current log format, if one is set.
+ if logger.handlers and hasattr(logger.handlers[0], 'formatter'):
+ fh.formatter = logger.handlers[0].formatter
+ else:
+ fh.setFormatter(logging.Formatter(
+ '(%(levelname)s) %(asctime)s %(message)s'))
+ logger.addHandler(fh)
+
+ try:
+ yield
+ finally:
+ logger = logging.getLogger()
+ logger.removeHandler(fh)
diff --git a/catapult/common/py_utils/py_utils/logging_util_unittest.py b/catapult/common/py_utils/py_utils/logging_util_unittest.py
new file mode 100644
index 00000000..a9577053
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/logging_util_unittest.py
@@ -0,0 +1,23 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import logging
+import StringIO
+import unittest
+
+from py_utils import logging_util
+
+
+class LoggingUtilTest(unittest.TestCase):
+ def testCapture(self):
+ s = StringIO.StringIO()
+ with logging_util.CaptureLogs(s):
+ logging.fatal('test')
+
+ # Only assert ends with, since the logging message by default has the date
+ # in it.
+ self.assertTrue(s.getvalue().endswith('test\n'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/catapult/common/py_utils/py_utils/memory_debug.py b/catapult/common/py_utils/py_utils/memory_debug.py
new file mode 100755
index 00000000..864725d0
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/memory_debug.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import heapq
+import logging
+import os
+import psutil
+import sys
+
+
+BYTE_UNITS = ['B', 'KiB', 'MiB', 'GiB']
+
+
+def FormatBytes(value):
+ def GetValueAndUnit(value):
+ for unit in BYTE_UNITS[:-1]:
+ if abs(value) < 1024.0:
+ return value, unit
+ value /= 1024.0
+ return value, BYTE_UNITS[-1]
+
+ if value is not None:
+ return '%.1f %s' % GetValueAndUnit(value)
+ else:
+ return 'N/A'
+
+
+def _GetProcessInfo(p):
+ pinfo = p.as_dict(attrs=['pid', 'name', 'memory_info'])
+ pinfo['mem_rss'] = getattr(pinfo['memory_info'], 'rss', 0)
+ return pinfo
+
+
+def _LogProcessInfo(pinfo, level):
+ pinfo['mem_rss_fmt'] = FormatBytes(pinfo['mem_rss'])
+ logging.log(level, '%(mem_rss_fmt)s (pid=%(pid)s)', pinfo)
+
+
+def LogHostMemoryUsage(top_n=10, level=logging.INFO):
+ if psutil.version_info < (2, 0):
+ logging.warning('psutil %s too old, upgrade to version 2.0 or higher'
+ ' for memory usage information.', psutil.__version__)
+ return
+
+ # TODO(crbug.com/777865): Remove the following pylint disable. Even if we
+ # check for a recent enough psutil version above, the catapult presubmit
+ # builder (still running some old psutil) fails pylint checks due to API
+ # changes in psutil.
+ # pylint: disable=no-member
+ mem = psutil.virtual_memory()
+ logging.log(level, 'Used %s out of %s memory available.',
+ FormatBytes(mem.used), FormatBytes(mem.total))
+ logging.log(level, 'Memory usage of top %i processes groups', top_n)
+ pinfos_by_names = {}
+ for p in psutil.process_iter():
+ pinfo = _GetProcessInfo(p)
+ pname = pinfo['name']
+ if pname not in pinfos_by_names:
+ pinfos_by_names[pname] = {'name': pname, 'total_mem_rss': 0, 'pids': []}
+ pinfos_by_names[pname]['total_mem_rss'] += pinfo['mem_rss']
+ pinfos_by_names[pname]['pids'].append(str(pinfo['pid']))
+
+ sorted_pinfo_groups = heapq.nlargest(
+ top_n, pinfos_by_names.values(), key=lambda item: item['total_mem_rss'])
+ for group in sorted_pinfo_groups:
+ group['total_mem_rss_fmt'] = FormatBytes(group['total_mem_rss'])
+ group['pids_fmt'] = ', '.join(group['pids'])
+ logging.log(
+ level, '- %(name)s - %(total_mem_rss_fmt)s - pids: %(pids)s', group)
+ logging.log(level, 'Current process:')
+ pinfo = _GetProcessInfo(psutil.Process(os.getpid()))
+ _LogProcessInfo(pinfo, level)
+
+
+def main():
+ logging.basicConfig(level=logging.INFO)
+ LogHostMemoryUsage()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/catapult/common/py_utils/py_utils/py_utils_unittest.py b/catapult/common/py_utils/py_utils/py_utils_unittest.py
index e614a454..588a5d57 100644
--- a/catapult/common/py_utils/py_utils/py_utils_unittest.py
+++ b/catapult/common/py_utils/py_utils/py_utils_unittest.py
@@ -53,3 +53,4 @@ class WaitForTest(unittest.TestCase):
def testWaitForFalseLambda(self):
with self.assertRaises(py_utils.TimeoutException):
py_utils.WaitFor(lambda: False, .1)
+
diff --git a/catapult/common/py_utils/py_utils/retry_util.py b/catapult/common/py_utils/py_utils/retry_util.py
new file mode 100644
index 00000000..e5826cab
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/retry_util.py
@@ -0,0 +1,57 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import functools
+import logging
+import time
+
+
+def RetryOnException(exc_type, retries):
+ """Decorator to retry running a function if an exception is raised.
+
+ Implements exponential backoff to wait between each retry attempt, starting
+ with 1 second.
+
+ Note: the default number of retries is defined on the decorator, the decorated
+ function *must* also receive a "retries" argument (although its assigned
+ default value is ignored), and clients of the funtion may override the actual
+ number of retries at the call site.
+
+ The "unused" retries argument on the decorated function must be given to
+ keep pylint happy and to avoid breaking the Principle of Least Astonishment
+ if the decorator were to change the signature of the function.
+
+ For example:
+
+ @retry_util.RetryOnException(OSError, retries=3) # default no. of retries
+ def ProcessSomething(thing, retries=None): # this default value is ignored
+ del retries # Unused. Handled by the decorator.
+ # Do your thing processing here, maybe sometimes raising exeptions.
+
+ ProcessSomething(a_thing) # retries 3 times.
+ ProcessSomething(b_thing, retries=5) # retries 5 times.
+
+ Args:
+ exc_type: An exception type (or a tuple of them), on which to retry.
+ retries: Default number of extra attempts to try, the caller may also
+ override this number. If an exception is raised during the last try,
+ then the exception is not caught and passed back to the caller.
+ """
+ def Decorator(f):
+ @functools.wraps(f)
+ def Wrapper(*args, **kwargs):
+ wait = 1
+ kwargs.setdefault('retries', retries)
+ for _ in xrange(kwargs['retries']):
+ try:
+ return f(*args, **kwargs)
+ except exc_type as exc:
+ logging.warning(
+ '%s raised %s, will retry in %d second%s ...',
+ f.__name__, type(exc).__name__, wait, '' if wait == 1 else 's')
+ time.sleep(wait)
+ wait *= 2
+ # Last try with no exception catching.
+ return f(*args, **kwargs)
+ return Wrapper
+ return Decorator
diff --git a/catapult/common/py_utils/py_utils/retry_util_unittest.py b/catapult/common/py_utils/py_utils/retry_util_unittest.py
new file mode 100644
index 00000000..151f88ed
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/retry_util_unittest.py
@@ -0,0 +1,118 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import mock
+import unittest
+
+from py_utils import retry_util
+
+
+class RetryOnExceptionTest(unittest.TestCase):
+ def setUp(self):
+ self.num_calls = 0
+ # Patch time.sleep to make tests run faster (skip waits) and also check
+ # that exponential backoff is implemented correctly.
+ patcher = mock.patch('time.sleep')
+ self.time_sleep = patcher.start()
+ self.addCleanup(patcher.stop)
+
+ def testNoExceptionsReturnImmediately(self):
+ @retry_util.RetryOnException(Exception, retries=3)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ return 'OK!'
+
+ # The function is called once and returns the expected value.
+ self.assertEqual(Test(), 'OK!')
+ self.assertEqual(self.num_calls, 1)
+
+ def testRaisesExceptionIfAlwaysFailing(self):
+ @retry_util.RetryOnException(KeyError, retries=5)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ raise KeyError('oops!')
+
+ # The exception is eventually raised.
+ with self.assertRaises(KeyError):
+ Test()
+ # The function is called the expected number of times.
+ self.assertEqual(self.num_calls, 6)
+ # Waits between retries do follow exponential backoff.
+ self.assertEqual(
+ self.time_sleep.call_args_list,
+ [mock.call(i) for i in (1, 2, 4, 8, 16)])
+
+ def testOtherExceptionsAreNotCaught(self):
+ @retry_util.RetryOnException(KeyError, retries=3)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ raise ValueError('oops!')
+
+ # The exception is raised immediately on the first try.
+ with self.assertRaises(ValueError):
+ Test()
+ self.assertEqual(self.num_calls, 1)
+
+ def testCallerMayOverrideRetries(self):
+ @retry_util.RetryOnException(KeyError, retries=3)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ raise KeyError('oops!')
+
+ with self.assertRaises(KeyError):
+ Test(retries=10)
+ # The value on the caller overrides the default on the decorator.
+ self.assertEqual(self.num_calls, 11)
+
+ def testCanEventuallySucceed(self):
+ @retry_util.RetryOnException(KeyError, retries=5)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ if self.num_calls < 3:
+ raise KeyError('oops!')
+ else:
+ return 'OK!'
+
+ # The value is returned after the expected number of calls.
+ self.assertEqual(Test(), 'OK!')
+ self.assertEqual(self.num_calls, 3)
+
+ def testRetriesCanBeSwitchedOff(self):
+ @retry_util.RetryOnException(KeyError, retries=5)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ if self.num_calls < 3:
+ raise KeyError('oops!')
+ else:
+ return 'OK!'
+
+ # We fail immediately on the first try.
+ with self.assertRaises(KeyError):
+ Test(retries=0)
+ self.assertEqual(self.num_calls, 1)
+
+ def testCanRetryOnMultipleExceptions(self):
+ @retry_util.RetryOnException((KeyError, ValueError), retries=3)
+ def Test(retries=None):
+ del retries
+ self.num_calls += 1
+ if self.num_calls == 1:
+ raise KeyError('oops!')
+ elif self.num_calls == 2:
+ raise ValueError('uh oh!')
+ else:
+ return 'OK!'
+
+ # Call eventually succeeds after enough tries.
+ self.assertEqual(Test(retries=5), 'OK!')
+ self.assertEqual(self.num_calls, 3)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/catapult/common/py_utils/py_utils/slots_metaclass.py b/catapult/common/py_utils/py_utils/slots_metaclass.py
new file mode 100644
index 00000000..ae36c677
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/slots_metaclass.py
@@ -0,0 +1,27 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+class SlotsMetaclass(type):
+ """This metaclass requires all subclasses to define __slots__.
+
+ Usage:
+ class Foo(object):
+ __metaclass__ = slots_metaclass.SlotsMetaclass
+ __slots__ = '_property0', '_property1',
+
+ __slots__ must be a tuple containing string names of all properties that the
+ class contains.
+ Defining __slots__ reduces memory usage, accelerates property access, and
+ prevents dynamically adding unlisted properties.
+ If you need to dynamically add unlisted properties to a class with this
+ metaclass, then take a step back and rethink your goals. If you really really
+ need to dynamically add unlisted properties to a class with this metaclass,
+ add '__dict__' to its __slots__.
+ """
+
+ def __new__(mcs, name, bases, attrs):
+ assert '__slots__' in attrs, 'Class "%s" must define __slots__' % name
+ assert isinstance(attrs['__slots__'], tuple), '__slots__ must be a tuple'
+
+ return super(SlotsMetaclass, mcs).__new__(mcs, name, bases, attrs)
diff --git a/catapult/common/py_utils/py_utils/slots_metaclass_unittest.py b/catapult/common/py_utils/py_utils/slots_metaclass_unittest.py
new file mode 100644
index 00000000..79bb343d
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/slots_metaclass_unittest.py
@@ -0,0 +1,41 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from py_utils import slots_metaclass
+
+class SlotsMetaclassUnittest(unittest.TestCase):
+
+ def testSlotsMetaclass(self):
+ class NiceClass(object):
+ __metaclass__ = slots_metaclass.SlotsMetaclass
+ __slots__ = '_nice',
+
+ def __init__(self, nice):
+ self._nice = nice
+
+ NiceClass(42)
+
+ with self.assertRaises(AssertionError):
+ class NaughtyClass(NiceClass):
+ def __init__(self, naughty):
+ super(NaughtyClass, self).__init__(42)
+ self._naughty = naughty
+
+ # Metaclasses are called when the class is defined, so no need to
+ # instantiate it.
+
+ with self.assertRaises(AttributeError):
+ class NaughtyClass2(NiceClass):
+ __slots__ = ()
+
+ def __init__(self, naughty):
+ super(NaughtyClass2, self).__init__(42)
+ self._naughty = naughty # pylint: disable=assigning-non-slot
+
+ # SlotsMetaclass is happy that __slots__ is defined, but python won't be
+ # happy about assigning _naughty when the class is instantiated because it
+ # isn't listed in __slots__, even if you disable the pylint error.
+ NaughtyClass2(666)
diff --git a/catapult/common/py_utils/py_utils/tempfile_ext.py b/catapult/common/py_utils/py_utils/tempfile_ext.py
index b0955307..394ad5b7 100644
--- a/catapult/common/py_utils/py_utils/tempfile_ext.py
+++ b/catapult/common/py_utils/py_utils/tempfile_ext.py
@@ -14,6 +14,11 @@ def NamedTemporaryDirectory(suffix='', prefix='tmp', dir=None):
This is a context manager version of tempfile.mkdtemp. The arguments to this
function are the same as the arguments for that one.
+
+ This can be used to automatically manage the lifetime of a temporary file
+ without maintaining an open file handle on it. Doing so can be useful in
+ scenarios where a parent process calls a child process to create a temporary
+ file and then does something with the resulting file.
"""
# This uses |dir| as a parameter name for consistency with mkdtemp.
# pylint: disable=redefined-builtin
diff --git a/catapult/common/py_utils/py_utils/test_data/discoverable_classes/__init__.py b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/__init__.py
new file mode 100644
index 00000000..9228df89
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/catapult/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py
new file mode 100644
index 00000000..0459ccf7
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py
@@ -0,0 +1,33 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""More dummy exception subclasses used by core/discover.py's unit tests."""
+
+# Import class instead of module explicitly so that inspect.getmembers() returns
+# two Exception subclasses in this current file.
+# Suppress complaints about unable to import class. The directory path is
+# added at runtime by telemetry test runner.
+#pylint: disable=import-error
+from discoverable_classes import discover_dummyclass
+
+
+class _PrivateDummyException(discover_dummyclass.DummyException):
+ def __init__(self):
+ super(_PrivateDummyException, self).__init__()
+
+
+class DummyExceptionImpl1(_PrivateDummyException):
+ def __init__(self):
+ super(DummyExceptionImpl1, self).__init__()
+
+
+class DummyExceptionImpl2(_PrivateDummyException):
+ def __init__(self):
+ super(DummyExceptionImpl2, self).__init__()
+
+
+class DummyExceptionWithParameterImpl1(_PrivateDummyException):
+ def __init__(self, parameter):
+ super(DummyExceptionWithParameterImpl1, self).__init__()
+ del parameter
diff --git a/catapult/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py
new file mode 100644
index 00000000..15dcb35a
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py
@@ -0,0 +1,9 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A dummy exception subclass used by core/discover.py's unit tests."""
+
+class DummyException(Exception):
+ def __init__(self):
+ super(DummyException, self).__init__()
diff --git a/catapult/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py
new file mode 100644
index 00000000..c37f4a99
--- /dev/null
+++ b/catapult/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py
@@ -0,0 +1,11 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A dummy exception subclass used by core/discover.py's unit tests."""
+from discoverable_classes import discover_dummyclass
+
+class DummyExceptionWithParameterImpl2(discover_dummyclass.DummyException):
+ def __init__(self, parameter1, parameter2):
+ super(DummyExceptionWithParameterImpl2, self).__init__()
+ del parameter1, parameter2