summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Frysinger <vapier@chromium.org>2014-02-09 23:32:23 -0500
committerchrome-internal-fetch <chrome-internal-fetch@google.com>2014-03-01 02:38:38 +0000
commit0c0efa21e5b7b6fb52a70eb5a252975ad64a035e (patch)
treed4063c44d056f493d0ef8f2428259dde0cdefef2
parent06460b7a93157251b3f923ec0cdb60ba511b5ad9 (diff)
downloadchromite-0c0efa21e5b7b6fb52a70eb5a252975ad64a035e.tar.gz
upload_symbols: use swarming server to cut down on duplicated uploads
We upload many symbols that are the same across runs and across bots. This slows down both the server and our bots -- each symbol upload is a few hundred milliseconds, and we have ~3000 files per build, which means we easily spend 30 minutes. The chromium project is already running a swarming service which we can (ab)use as a deduplication service. We use a hash of the symbol header as an index and avoid uploading symbols whenever the server says it has seen it already. This CL only makes the functionality available. A follow up CL will take care of turning it on. See the upstream project https://code.google.com/p/swarming/ for more. BUG=chromium:209442 TEST=`./buildbot/run_tests` passes TEST=`cbuildbot daisy-release` passes (tests re-exec path) TEST=moved swarming.client and made sure `cbuildbot -h` worked still TEST=moved swarming.client and made sure `upload_symbols -h` aborted CQ-DEPEND=CL:185621 Change-Id: Ib823cfccf7cdcb38253634b9a2744062727e7f0e Reviewed-on: https://chromium-review.googlesource.com/185622 Reviewed-by: David James <davidjames@chromium.org> Commit-Queue: Mike Frysinger <vapier@chromium.org> Tested-by: Mike Frysinger <vapier@chromium.org>
-rw-r--r--__init__.py3
-rw-r--r--buildbot/constants.py2
-rw-r--r--scripts/upload_symbols.py281
-rwxr-xr-xscripts/upload_symbols_unittest.py119
-rw-r--r--third_party/.gitignore1
5 files changed, 349 insertions, 57 deletions
diff --git a/__init__.py b/__init__.py
index 43226417d..180103db6 100644
--- a/__init__.py
+++ b/__init__.py
@@ -22,7 +22,8 @@ if os.path.basename(_containing_dir) == 'third_party':
# List of third_party packages that might need subpaths added to search.
_paths = [
- 'pyelftools',
+ 'pyelftools',
+ 'swarming.client',
]
for _path in _paths:
diff --git a/buildbot/constants.py b/buildbot/constants.py
index b4ee15ae0..fec74c088 100644
--- a/buildbot/constants.py
+++ b/buildbot/constants.py
@@ -47,6 +47,8 @@ REEXEC_API_MAJOR = 0
REEXEC_API_MINOR = 2
REEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)
+ISOLATESERVER = 'https://isolateserver.appspot.com'
+
GOOGLE_EMAIL = '@google.com'
CHROMIUM_EMAIL = '@chromium.org'
diff --git a/scripts/upload_symbols.py b/scripts/upload_symbols.py
index 8dc51b17f..421f9a421 100644
--- a/scripts/upload_symbols.py
+++ b/scripts/upload_symbols.py
@@ -14,6 +14,7 @@ from __future__ import print_function
import ctypes
import datetime
import functools
+import hashlib
import httplib
import multiprocessing
import os
@@ -25,12 +26,24 @@ import tempfile
import time
import urllib2
+from chromite.buildbot import constants
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import parallel
from chromite.lib import retry_util
+from chromite.lib import timeout_util
from chromite.scripts import cros_generate_breakpad_symbols
+# Needs to be after chromite imports.
+# TODO(build): When doing the initial buildbot bootstrap, we won't have any
+# other repos available. So ignore isolateserver imports. But buildbot will
+# re-exec itself once it has done a full repo sync and then the module will
+# be available -- it isn't needed that early. http://crbug.com/341152
+try:
+ import isolateserver
+except ImportError:
+ isolateserver = None
+
# URLs used for uploading symbols.
OFFICIAL_UPLOAD_URL = 'http://clients2.google.com/cr/symbol'
@@ -43,6 +56,26 @@ CRASH_SERVER_FILE_LIMIT = 350 * 1024 * 1024
DEFAULT_FILE_LIMIT = CRASH_SERVER_FILE_LIMIT - (10 * 1024 * 1024)
+# The batch limit when talking to the dedup server. We avoid sending one at a
+# time as the round trip overhead will dominate. Conversely, we avoid sending
+# all at once so we can start uploading symbols asap -- the symbol server is a
+# bit slow and will take longer than anything else.
+# TODO: A better algorithm would be adaptive. If we have more than one symbol
+# in the upload queue waiting, we could send more symbols to the dedupe server
+# at a time.
+DEDUPE_LIMIT = 100
+
+# How long to wait for the server to respond with the results. Note that the
+# larger the limit above, the larger this will need to be. So we give it ~1
+# second per item max.
+DEDUPE_TIMEOUT = DEDUPE_LIMIT
+
+# The unique namespace in the dedupe server that only we use. Helps avoid
+# collisions with all the hashed values and unrelated content.
+OFFICIAL_DEDUPE_NAMESPACE = 'chromium-os-upload-symbols'
+STAGING_DEDUPE_NAMESPACE = '%s-staging' % OFFICIAL_DEDUPE_NAMESPACE
+
+
# How long to wait (in seconds) for a single upload to complete. This has
# to allow for symbols that are up to CRASH_SERVER_FILE_LIMIT in size.
UPLOAD_TIMEOUT = 30 * 60
@@ -88,7 +121,7 @@ ERROR_ADJUST_FAIL = 1.0
ERROR_ADJUST_PASS = -0.5
-def SymUpload(sym_file, upload_url):
+def SymUpload(upload_url, sym_item):
"""Upload a symbol file to a HTTP server
The upload is a multipart/form-data POST with the following parameters:
@@ -105,10 +138,11 @@ def SymUpload(sym_file, upload_url):
symbol_file: the contents of the breakpad-format symbol file
Args:
- sym_file: The symbol file to upload
upload_url: The crash URL to POST the |sym_file| to
+ sym_item: A SymbolItem containing the path to the breakpad symbol to upload
"""
- sym_header = cros_generate_breakpad_symbols.ReadSymsHeader(sym_file)
+ sym_header = sym_item.sym_header
+ sym_file = sym_item.sym_file
fields = (
('code_file', sym_header.name),
@@ -130,9 +164,9 @@ def SymUpload(sym_file, upload_url):
urllib2.urlopen(request, timeout=UPLOAD_TIMEOUT)
-def TestingSymUpload(sym_file, upload_url):
+def TestingSymUpload(upload_url, sym_item):
"""A stub version of SymUpload for --testing usage"""
- cmd = ['sym_upload', sym_file, upload_url]
+ cmd = ['sym_upload', sym_item.sym_file, upload_url]
# Randomly fail 80% of the time (the retry logic makes this 80%/3 per file).
returncode = random.randint(1, 100) <= 80
cros_build_lib.Debug('would run (and return %i): %s', returncode,
@@ -193,23 +227,27 @@ def _UpdateCounter(counter, adj):
_Update()
-def UploadSymbol(sym_file, upload_url, file_limit=DEFAULT_FILE_LIMIT,
+def UploadSymbol(upload_url, sym_item, file_limit=DEFAULT_FILE_LIMIT,
sleep=0, num_errors=None, watermark_errors=None,
- failed_queue=None):
- """Upload |sym_file| to |upload_url|
+ failed_queue=None, passed_queue=None):
+ """Upload |sym_item| to |upload_url|
Args:
- sym_file: The full path to the breakpad symbol to upload
upload_url: The crash server to upload things to
+ sym_item: A SymbolItem containing the path to the breakpad symbol to upload
file_limit: The max file size of a symbol file before we try to strip it
sleep: Number of seconds to sleep before running
num_errors: An object to update with the error count (needs a .value member)
watermark_errors: An object to track current error behavior (needs a .value)
failed_queue: When a symbol fails, add it to this queue
+ passed_queue: When a symbol passes, add it to this queue
Returns:
The number of errors that were encountered.
"""
+ sym_file = sym_item.sym_file
+ upload_item = sym_item
+
if num_errors is None:
num_errors = ctypes.c_int()
if ErrorLimitHit(num_errors, watermark_errors):
@@ -218,8 +256,6 @@ def UploadSymbol(sym_file, upload_url, file_limit=DEFAULT_FILE_LIMIT,
failed_queue.put(sym_file)
return 0
- upload_file = sym_file
-
if sleep:
# Keeps us from DoS-ing the symbol server.
time.sleep(sleep)
@@ -240,11 +276,13 @@ def UploadSymbol(sym_file, upload_url, file_limit=DEFAULT_FILE_LIMIT,
sym_file, file_size, file_limit)
temp_sym_file.writelines([x for x in open(sym_file, 'rb').readlines()
if not x.startswith('STACK CFI')])
- upload_file = temp_sym_file.name
+
+ upload_item = FakeItem(sym_file=temp_sym_file.name,
+ sym_header=sym_item.sym_header)
# Hopefully the crash server will let it through. But it probably won't.
# Not sure what the best answer is in this case.
- file_size = os.path.getsize(upload_file)
+ file_size = os.path.getsize(upload_item.sym_file)
if file_size > CRASH_SERVER_FILE_LIMIT:
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Warning('upload file %s is awfully large, risking '
@@ -257,10 +295,13 @@ def UploadSymbol(sym_file, upload_url, file_limit=DEFAULT_FILE_LIMIT,
cros_build_lib.TimedCommand(
retry_util.RetryException,
(urllib2.HTTPError, urllib2.URLError), MAX_RETRIES, SymUpload,
- upload_file, upload_url, sleep=INITIAL_RETRY_DELAY,
+ upload_url, upload_item, sleep=INITIAL_RETRY_DELAY,
timed_log_msg='upload of %10i bytes took %%s: %s' %
(file_size, os.path.basename(sym_file)))
success = True
+
+ if passed_queue:
+ passed_queue.put(sym_item)
except urllib2.HTTPError as e:
cros_build_lib.Warning('could not upload: %s: HTTP %s: %s',
os.path.basename(sym_file), e.code, e.reason)
@@ -279,6 +320,88 @@ def UploadSymbol(sym_file, upload_url, file_limit=DEFAULT_FILE_LIMIT,
return num_errors.value
+# A dummy class that allows for stubbing in tests and SymUpload.
+FakeItem = cros_build_lib.Collection(
+ 'FakeItem', sym_file=None, sym_header=None, content=lambda x: '')
+
+
+# TODO(build): Delete this if check. http://crbug.com/341152
+if isolateserver:
+ class SymbolItem(isolateserver.BufferItem):
+ """Turn a sym_file into an isolateserver.Item"""
+
+ ALGO = hashlib.sha1
+
+ def __init__(self, sym_file):
+ sym_header = cros_generate_breakpad_symbols.ReadSymsHeader(sym_file)
+ super(SymbolItem, self).__init__(str(sym_header), self.ALGO)
+ self.sym_header = sym_header
+ self.sym_file = sym_file
+
+
+def SymbolDeduplicatorNotify(dedupe_namespace, dedupe_queue):
+ """Send a symbol file to the swarming service
+
+ Notify the swarming service of a successful upload. If the notification fails
+ for any reason, we ignore it. We don't care as it just means we'll upload it
+ again later on, and the symbol server will handle that graciously.
+
+ This func runs in a different process from the main one, so we cannot share
+ the storage object. Instead, we create our own. This func stays alive for
+ the life of the process, so we only create one here overall.
+
+ Args:
+ dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
+ dedupe_queue: The queue to read SymbolItems from
+ """
+ if dedupe_queue is None:
+ return
+
+ item = None
+ try:
+ storage = isolateserver.get_storage_api(constants.ISOLATESERVER,
+ dedupe_namespace)
+ for item in iter(dedupe_queue.get, None):
+ with timeout_util.Timeout(DEDUPE_TIMEOUT):
+ storage.push(item, item.content(0))
+ except Exception:
+ sym_file = item.sym_file if (item and item.sym_file) else ''
+ cros_build_lib.Warning('posting %s to dedupe server failed',
+ os.path.basename(sym_file), exc_info=True)
+
+
+def SymbolDeduplicator(storage, sym_paths):
+ """Filter out symbol files that we've already uploaded
+
+ Using the swarming service, ask it to tell us which symbol files we've already
+ uploaded in previous runs and/or by other bots. If the query fails for any
+ reason, we'll just upload all symbols. This is fine as the symbol server will
+ do the right thing and this phase is purely an optimization.
+
+ This code runs in the main thread which is why we can re-use the existing
+ storage object. Saves us from having to recreate one all the time.
+
+ Args:
+ storage: An isolateserver.StorageApi object
+ sym_paths: List of symbol files to check against the dedupe server
+
+ Returns:
+ List of symbol files that have not been uploaded before
+ """
+ if not sym_paths:
+ return sym_paths
+
+ items = [SymbolItem(x) for x in sym_paths]
+ if storage:
+ try:
+ with timeout_util.Timeout(DEDUPE_TIMEOUT):
+ items = storage.contains(items)
+ except Exception:
+ cros_build_lib.Warning('talking to dedupe server failed', exc_info=True)
+
+ return items
+
+
def SymbolFinder(paths):
"""Locate symbol files in |paths|
@@ -299,10 +422,29 @@ def SymbolFinder(paths):
yield p
+def WriteQueueToFile(listing, queue, relpath=None):
+ """Write all the items in |queue| to the |listing|.
+
+ Args:
+ listing: Where to write out the list of files.
+ queue: The queue of paths to drain.
+ relpath: If set, write out paths relative to this one.
+ """
+ if not listing:
+ return
+
+ with cros_build_lib.Open(listing, 'wb+') as f:
+ while not queue.empty():
+ path = queue.get()
+ if relpath:
+ path = os.path.relpath(path, relpath)
+ f.write('%s\n' % path)
+
+
def UploadSymbols(board=None, official=False, breakpad_dir=None,
file_limit=DEFAULT_FILE_LIMIT, sleep=DEFAULT_SLEEP_DELAY,
upload_limit=None, sym_paths=None, failed_list=None,
- root=None, retry=True):
+ root=None, retry=True, dedupe_namespace=None):
"""Upload all the generated symbols for |board| to the crash server
You can use in a few ways:
@@ -323,10 +465,14 @@ def UploadSymbols(board=None, official=False, breakpad_dir=None,
filename or file-like object.
root: The tree to prefix to |breakpad_dir| (if |breakpad_dir| is not set)
retry: Whether we should retry failures.
+ dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
Returns:
The number of errors that were encountered.
"""
+ # TODO(build): Delete this assert.
+ assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
+
if official:
upload_url = OFFICIAL_UPLOAD_URL
else:
@@ -344,18 +490,55 @@ def UploadSymbols(board=None, official=False, breakpad_dir=None,
breakpad_dir)
sym_paths = [breakpad_dir]
+ # We use storage_query to ask the server about existing symbols. The
+ # storage_notify_proc process is used to post updates to the server. We
+ # cannot safely share the storage object between threads/processes, but
+ # we also want to minimize creating new ones as each object has to init
+ # new state (like server connections).
+ if dedupe_namespace:
+ dedupe_limit = DEDUPE_LIMIT
+ dedupe_queue = multiprocessing.Queue()
+ storage_query = isolateserver.get_storage_api(constants.ISOLATESERVER,
+ dedupe_namespace)
+ else:
+ dedupe_limit = 1
+ dedupe_queue = storage_query = None
+ # Can't use parallel.BackgroundTaskRunner because that'll create multiple
+ # processes and we want only one the whole time (see comment above).
+ storage_notify_proc = multiprocessing.Process(
+ target=SymbolDeduplicatorNotify, args=(dedupe_namespace, dedupe_queue))
+
bg_errors = multiprocessing.Value('i')
watermark_errors = multiprocessing.Value('f')
failed_queue = multiprocessing.Queue()
uploader = functools.partial(
- UploadSymbol, file_limit=file_limit, sleep=sleep, num_errors=bg_errors,
- watermark_errors=watermark_errors, failed_queue=failed_queue)
+ UploadSymbol, upload_url, file_limit=file_limit, sleep=sleep,
+ num_errors=bg_errors, watermark_errors=watermark_errors,
+ failed_queue=failed_queue, passed_queue=dedupe_queue)
start_time = datetime.datetime.now()
Counters = cros_build_lib.Collection(
- 'Counters', upload_limit=upload_limit, uploaded_count=0)
+ 'Counters', upload_limit=upload_limit, uploaded_count=0, deduped_count=0)
counters = Counters()
+ def _Upload(queue, counters, files):
+ if not files:
+ return
+
+ missing_count = 0
+ for item in SymbolDeduplicator(storage_query, files):
+ if counters.upload_limit == 0:
+ break
+
+ missing_count += 1
+ queue.put((item,))
+ counters.uploaded_count += 1
+ if counters.upload_limit is not None:
+ counters.upload_limit -= 1
+
+ counters.deduped_count += (len(files) - missing_count)
+
+ storage_notify_proc.start()
# For the first run, we collect the symbols that failed. If the
# overall failure rate was low, we'll retry them on the second run.
for retry in (retry, False):
@@ -364,17 +547,20 @@ def UploadSymbols(board=None, official=False, breakpad_dir=None,
# http://crbug.com/209442
# http://crbug.com/212496
with parallel.BackgroundTaskRunner(uploader, processes=1) as queue:
+ dedupe_list = []
for sym_file in SymbolFinder(sym_paths):
- if counters.upload_limit == 0:
- break
-
- queue.put([sym_file, upload_url])
- counters.uploaded_count += 1
-
- if counters.upload_limit is not None:
- counters.upload_limit -= 1
-
- # See if we need to retry, and if we haven't failed too many times already.
+ dedupe_list.append(sym_file)
+ dedupe_len = len(dedupe_list)
+ if dedupe_len < dedupe_limit:
+ if (counters.upload_limit is None or
+ dedupe_len < counters.upload_limit):
+ continue
+
+ _Upload(queue, counters, dedupe_list)
+ dedupe_list = []
+ _Upload(queue, counters, dedupe_list)
+
+ # See if we need to retry, and if we haven't failed too many times yet.
if not retry or ErrorLimitHit(bg_errors, watermark_errors):
break
@@ -386,30 +572,33 @@ def UploadSymbols(board=None, official=False, breakpad_dir=None,
if counters.upload_limit is not None:
counters.upload_limit += len(sym_paths)
# Decrement the error count in case we recover in the second pass.
- assert bg_errors.value >= len(sym_paths), 'more failed files than errors?'
+ assert bg_errors.value >= len(sym_paths), \
+ 'more failed files than errors?'
bg_errors.value -= len(sym_paths)
else:
# No failed symbols, so just return now.
break
# If the user has requested it, save all the symbol files that we failed to
- # upload to a listing file. This should help with recovery efforts later on.
- if failed_list:
- with cros_build_lib.Open(failed_list, 'wb+') as f:
- while not failed_queue.empty():
- path = failed_queue.get()
- if breakpad_dir:
- path = os.path.relpath(path, breakpad_dir)
- f.write('%s\n' % path)
-
- cros_build_lib.Info('uploaded %i symbols which took: %s',
- counters.uploaded_count,
+ # upload to a listing file. This should help with recovery efforts later.
+ WriteQueueToFile(failed_list, failed_queue, breakpad_dir)
+
+ if dedupe_queue:
+ dedupe_queue.put(None)
+ dedupe_queue.close()
+ storage_notify_proc.join()
+
+ cros_build_lib.Info('uploaded %i symbols (%i were deduped) which took: %s',
+ counters.uploaded_count, counters.deduped_count,
datetime.datetime.now() - start_time)
return bg_errors.value
def main(argv):
+ # TODO(build): Delete this assert.
+ assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
+
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('sym_paths', type='path', nargs='*', default=None)
@@ -428,6 +617,8 @@ def main(argv):
help='strip CFI data for files above this size')
parser.add_argument('--failed-list', type='path',
help='where to save a list of failed symbols')
+ parser.add_argument('--dedupe', action='store_true', default=False,
+ help='use the swarming service to avoid re-uploading')
parser.add_argument('--testing', action='store_true', default=False,
help='run in testing mode')
parser.add_argument('--yes', action='store_true', default=False,
@@ -454,6 +645,13 @@ def main(argv):
INITIAL_RETRY_DELAY = DEFAULT_SLEEP_DELAY = 0
SymUpload = TestingSymUpload
+ dedupe_namespace = None
+ if opts.dedupe:
+ if opts.official_build and not opts.testing:
+ dedupe_namespace = OFFICIAL_DEDUPE_NAMESPACE
+ else:
+ dedupe_namespace = STAGING_DEDUPE_NAMESPACE
+
if not opts.yes:
prolog = '\n'.join(textwrap.wrap(textwrap.dedent("""
Uploading symbols for an entire Chromium OS build is really only
@@ -476,7 +674,8 @@ def main(argv):
breakpad_dir=opts.breakpad_root,
file_limit=opts.strip_cfi, sleep=DEFAULT_SLEEP_DELAY,
upload_limit=opts.upload_limit, sym_paths=opts.sym_paths,
- failed_list=opts.failed_list)
+ failed_list=opts.failed_list,
+ dedupe_namespace=dedupe_namespace)
if ret:
cros_build_lib.Error('encountered %i problem(s)', ret)
# Since exit(status) gets masked, clamp it to 1 so we don't inadvertently
diff --git a/scripts/upload_symbols_unittest.py b/scripts/upload_symbols_unittest.py
index c650f0ecc..ddf4163a4 100755
--- a/scripts/upload_symbols_unittest.py
+++ b/scripts/upload_symbols_unittest.py
@@ -9,6 +9,7 @@ from __future__ import print_function
import ctypes
import logging
+import multiprocessing
import os
import sys
import urllib2
@@ -23,6 +24,11 @@ from chromite.lib import parallel_unittest
from chromite.scripts import cros_generate_breakpad_symbols
from chromite.scripts import upload_symbols
+# TODO(build): Finish test wrapper (http://crosbug.com/37517).
+# Until then, this has to be after the chromite imports.
+import isolateserver
+import mock
+
class UploadSymbolsTest(cros_test_lib.MockTempDirTestCase):
"""Tests for UploadSymbols()"""
@@ -49,9 +55,9 @@ class UploadSymbolsTest(cros_test_lib.MockTempDirTestCase):
self.assertEqual(ret, 0)
self.assertEqual(self.upload_mock.call_count, 3)
for call_args in self.upload_mock.call_args_list:
- sym_file, url = call_args[0]
+ url, sym_item = call_args[0]
self.assertEqual(url, expected_url)
- self.assertTrue(sym_file.endswith('.sym'))
+ self.assertTrue(sym_item.sym_file.endswith('.sym'))
def testOfficialUploadURL(self):
"""Verify we upload to the real crash server for official builds"""
@@ -85,7 +91,7 @@ class UploadSymbolsTest(cros_test_lib.MockTempDirTestCase):
def testFailedFileList(self):
"""Verify the failed file list is populated with the right content"""
def UploadSymbol(*args, **kwargs):
- kwargs['failed_queue'].put(args[0])
+ kwargs['failed_queue'].put(args[1].sym_file)
kwargs['num_errors'].value = 4
self.upload_mock.side_effect = UploadSymbol
with parallel_unittest.ParallelMock():
@@ -104,27 +110,80 @@ class UploadSymbolsTest(cros_test_lib.MockTempDirTestCase):
self.assertEquals(exp_list, got_list)
+class SymbolDeduplicatorNotifyTest(cros_test_lib.MockTestCase):
+ """Tests for SymbolDeduplicatorNotify()"""
+
+ def setUp(self):
+ self.storage_mock = self.PatchObject(isolateserver, 'get_storage_api')
+
+ def testSmoke(self):
+ """Basic run through the system."""
+ q = mock.MagicMock()
+ q.get.side_effect = (upload_symbols.FakeItem(), None,)
+ upload_symbols.SymbolDeduplicatorNotify('name', q)
+
+ def testStorageException(self):
+ """We want to just warn & move on when dedupe server fails"""
+ log_mock = self.PatchObject(cros_build_lib, 'Warning')
+ q = mock.MagicMock()
+ q.get.side_effect = (upload_symbols.FakeItem(), None,)
+ self.storage_mock.side_effect = Exception
+ upload_symbols.SymbolDeduplicatorNotify('name', q)
+ self.assertEqual(log_mock.call_count, 1)
+
+
+class SymbolDeduplicatorTest(cros_test_lib.MockTestCase):
+ """Tests for SymbolDeduplicator()"""
+
+ def setUp(self):
+ self.storage_mock = mock.MagicMock()
+ self.header_mock = self.PatchObject(
+ cros_generate_breakpad_symbols, 'ReadSymsHeader',
+ return_value=cros_generate_breakpad_symbols.SymbolHeader(
+ os='os', cpu='cpu', id='id', name='name'))
+
+ def testNoStorageOrPaths(self):
+ """We don't want to talk to the server if there's no storage or files"""
+ upload_symbols.SymbolDeduplicator(None, [])
+ upload_symbols.SymbolDeduplicator(self.storage_mock, [])
+ self.assertEqual(self.storage_mock.call_count, 0)
+ self.assertEqual(self.header_mock.call_count, 0)
+
+ def testStorageException(self):
+ """We want to just warn & move on when dedupe server fails"""
+ log_mock = self.PatchObject(cros_build_lib, 'Warning')
+ self.storage_mock.contains.side_effect = Exception('storage error')
+ sym_paths = ['/a', '/bbbbbb', '/cc.c']
+ ret = upload_symbols.SymbolDeduplicator(self.storage_mock, sym_paths)
+ self.assertEqual(log_mock.call_count, 1)
+ self.assertEqual(self.storage_mock.contains.call_count, 1)
+ self.assertEqual(self.header_mock.call_count, len(sym_paths))
+ self.assertEqual(len(ret), len(sym_paths))
+
+
class UploadSymbolTest(cros_test_lib.MockTempDirTestCase):
"""Tests for UploadSymbol()"""
def setUp(self):
self.sym_file = os.path.join(self.tempdir, 'foo.sym')
+ self.sym_item = upload_symbols.FakeItem(sym_file=self.sym_file)
self.url = 'http://eatit'
self.upload_mock = self.PatchObject(upload_symbols, 'SymUpload')
def testUploadSymbolNormal(self):
"""Verify we try to upload on a normal file"""
osutils.Touch(self.sym_file)
- ret = upload_symbols.UploadSymbol(self.sym_file, self.url)
+ ret = upload_symbols.UploadSymbol(self.url, self.sym_item)
self.assertEqual(ret, 0)
- self.upload_mock.assert_called_with(self.sym_file, self.url)
+ self.upload_mock.assert_called_with(self.url, self.sym_item)
self.assertEqual(self.upload_mock.call_count, 1)
def testUploadSymbolErrorCountExceeded(self):
"""Verify that when the error count gets too high, we stop uploading"""
errors = ctypes.c_int(10000)
# Pass in garbage values so that we crash if num_errors isn't handled.
- ret = upload_symbols.UploadSymbol(None, None, sleep=None, num_errors=errors)
+ ret = upload_symbols.UploadSymbol(None, self.sym_item, sleep=None,
+ num_errors=errors)
self.assertEqual(ret, 0)
def testUploadRetryErrors(self, side_effect=None):
@@ -133,9 +192,10 @@ class UploadSymbolTest(cros_test_lib.MockTempDirTestCase):
side_effect = urllib2.HTTPError('http://', 400, 'fail', {}, None)
self.upload_mock.side_effect = side_effect
errors = ctypes.c_int()
- ret = upload_symbols.UploadSymbol('/dev/null', self.url, num_errors=errors)
+ item = upload_symbols.FakeItem(sym_file='/dev/null')
+ ret = upload_symbols.UploadSymbol(self.url, item, num_errors=errors)
self.assertEqual(ret, 1)
- self.upload_mock.assert_called_with('/dev/null', self.url)
+ self.upload_mock.assert_called_with(self.url, item)
self.assertTrue(self.upload_mock.call_count >= upload_symbols.MAX_RETRIES)
def testConnectRetryErrors(self):
@@ -145,8 +205,8 @@ class UploadSymbolTest(cros_test_lib.MockTempDirTestCase):
def testTruncateTooBigFiles(self):
"""Verify we shrink big files"""
- def SymUpload(sym_file, _url):
- content = osutils.ReadFile(sym_file)
+ def SymUpload(_url, sym_item):
+ content = osutils.ReadFile(sym_item.sym_file)
self.assertEqual(content, 'some junk\n')
self.upload_mock.upload_mock.side_effect = SymUpload
content = '\n'.join((
@@ -155,9 +215,12 @@ class UploadSymbolTest(cros_test_lib.MockTempDirTestCase):
'STACK CFI 1234',
))
osutils.WriteFile(self.sym_file, content)
- ret = upload_symbols.UploadSymbol(self.sym_file, self.url, file_limit=1)
+ ret = upload_symbols.UploadSymbol(self.url, self.sym_item, file_limit=1)
self.assertEqual(ret, 0)
- self.assertNotEqual(self.upload_mock.call_args[0][1], self.sym_file)
+ # Make sure the item passed to the upload has a temp file and not the
+ # original -- only the temp one has been stripped down.
+ temp_item = self.upload_mock.call_args[0][1]
+ self.assertNotEqual(temp_item.sym_file, self.sym_item.sym_file)
self.assertEqual(self.upload_mock.call_count, 1)
def testTruncateReallyLargeFiles(self):
@@ -167,9 +230,12 @@ class UploadSymbolTest(cros_test_lib.MockTempDirTestCase):
f.truncate(upload_symbols.CRASH_SERVER_FILE_LIMIT + 100)
f.seek(0)
f.write('STACK CFI 1234\n\n')
- ret = upload_symbols.UploadSymbol(self.sym_file, self.url)
+ ret = upload_symbols.UploadSymbol(self.url, self.sym_item)
self.assertEqual(ret, 0)
- self.assertNotEqual(self.upload_mock.call_args[0][1], self.sym_file)
+ # Make sure the item passed to the upload has a temp file and not the
+ # original -- only the temp one has been truncated.
+ temp_item = self.upload_mock.call_args[0][1]
+ self.assertNotEqual(temp_item.sym_file, self.sym_item.sym_file)
self.assertEqual(self.upload_mock.call_count, 1)
self.assertEqual(warn_mock.call_count, 1)
@@ -184,11 +250,12 @@ PUBLIC 1471 0 main"""
def setUp(self):
self.sym_file = os.path.join(self.tempdir, 'test.sym')
osutils.WriteFile(self.sym_file, self.SYM_CONTENTS)
+ self.sym_item = upload_symbols.SymbolItem(self.sym_file)
def testPostUpload(self):
"""Verify HTTP POST has all the fields we need"""
m = self.PatchObject(urllib2, 'urlopen', autospec=True)
- upload_symbols.SymUpload(self.sym_file, self.SYM_URL)
+ upload_symbols.SymUpload(self.SYM_URL, self.sym_item)
self.assertEquals(m.call_count, 1)
req = m.call_args[0][0]
self.assertEquals(req.get_full_url(), self.SYM_URL)
@@ -212,6 +279,28 @@ PUBLIC 1471 0 main"""
self.assertTrue(self.SYM_CONTENTS in data)
+class UtilTest(cros_test_lib.TempDirTestCase):
+ """Various tests for utility funcs."""
+
+ def testWriteQueueToFile(self):
+ """Basic test for WriteQueueToFile."""
+ listing = os.path.join(self.tempdir, 'list')
+ exp_list = [
+ 'b/c.txt',
+ 'foo.log',
+ 'there/might/be/giants',
+ ]
+ relpath = '/a'
+
+ q = multiprocessing.Queue()
+ for f in exp_list:
+ q.put(os.path.join(relpath, f))
+ upload_symbols.WriteQueueToFile(listing, q, '/a')
+
+ got_list = osutils.ReadFile(listing).splitlines()
+ self.assertEquals(exp_list, got_list)
+
+
if __name__ == '__main__':
# pylint: disable=W0212
# Set timeouts small so that if the unit test hangs, it won't hang for long.
diff --git a/third_party/.gitignore b/third_party/.gitignore
index 44aea14cc..0120cb60f 100644
--- a/third_party/.gitignore
+++ b/third_party/.gitignore
@@ -1 +1,2 @@
/pyelftools
+/swarming.client