1 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Upload all debug symbols required for crash reporting purposes.
7 This script need only be used to upload release builds symbols or to debug
8 crashes on non-release builds (in which case try to only upload the symbols
9 for those executables involved).
12 from __future__ import print_function
20 import multiprocessing
26 # Python-3 renamed to "queue". We still use Queue to avoid collisions
27 # with naming variables as "queue". Maybe we'll transition at some point.
28 # pylint: disable=F0401
39 from chromite.cbuildbot import constants
40 from chromite.lib import cache
41 from chromite.lib import commandline
42 from chromite.lib import cros_build_lib
43 from chromite.lib import gs
44 from chromite.lib import osutils
45 from chromite.lib import parallel
46 from chromite.lib import retry_util
47 from chromite.lib import signals
48 from chromite.lib import timeout_util
49 from chromite.scripts import cros_generate_breakpad_symbols
51 # Needs to be after chromite imports.
52 # TODO(build): When doing the initial buildbot bootstrap, we won't have any
53 # other repos available. So ignore isolateserver imports. But buildbot will
54 # re-exec itself once it has done a full repo sync and then the module will
55 # be available -- it isn't needed that early. http://crbug.com/341152
62 # URLs used for uploading symbols.
63 OFFICIAL_UPLOAD_URL = 'http://clients2.google.com/cr/symbol'
64 STAGING_UPLOAD_URL = 'http://clients2.google.com/cr/staging_symbol'
67 # The crash server rejects files that are this big.
68 CRASH_SERVER_FILE_LIMIT = 350 * 1024 * 1024
69 # Give ourselves a little breathing room from what the server expects.
70 DEFAULT_FILE_LIMIT = CRASH_SERVER_FILE_LIMIT - (10 * 1024 * 1024)
73 # The batch limit when talking to the dedup server. We avoid sending one at a
74 # time as the round trip overhead will dominate. Conversely, we avoid sending
75 # all at once so we can start uploading symbols asap -- the symbol server is a
76 # bit slow and will take longer than anything else.
77 # TODO: A better algorithm would be adaptive. If we have more than one symbol
78 # in the upload queue waiting, we could send more symbols to the dedupe server
82 # How long to wait for the server to respond with the results. Note that the
83 # larger the limit above, the larger this will need to be. So we give it ~1
84 # second per item max.
85 DEDUPE_TIMEOUT = DEDUPE_LIMIT
87 # How long to wait for the notification to finish (in minutes). If it takes
88 # longer than this, we'll stop notifiying, but that's not a big deal as we
89 # will be able to recover in later runs.
90 DEDUPE_NOTIFY_TIMEOUT = 20
92 # The unique namespace in the dedupe server that only we use. Helps avoid
93 # collisions with all the hashed values and unrelated content.
94 OFFICIAL_DEDUPE_NAMESPACE = 'chromium-os-upload-symbols'
95 STAGING_DEDUPE_NAMESPACE = '%s-staging' % OFFICIAL_DEDUPE_NAMESPACE
98 # The minimum average rate (in bytes per second) that we expect to maintain
99 # when uploading symbols. This has to allow for symbols that are up to
100 # CRASH_SERVER_FILE_LIMIT in size.
101 UPLOAD_MIN_RATE = CRASH_SERVER_FILE_LIMIT / (30 * 60)
103 # The lowest timeout (in seconds) we'll allow. If the server is overloaded,
104 # then there might be a delay in setting up the connection, not just with the
105 # transfer. So even a small file might need a larger value.
106 UPLOAD_MIN_TIMEOUT = 2 * 60
109 # Sleep for 200ms in between uploads to avoid DoS'ing symbol server.
110 DEFAULT_SLEEP_DELAY = 0.2
113 # Number of seconds to wait before retrying an upload. The delay will double
114 # for each subsequent retry of the same symbol file.
115 INITIAL_RETRY_DELAY = 1
117 # Allow up to 7 attempts to upload a symbol file (total delay may be
118 # 1+2+4+8+16+32=63 seconds).
121 # Number of total errors, before uploads are no longer attempted.
122 # This is used to avoid lots of errors causing unreasonable delays.
123 # See the related, but independent, error values below.
124 MAX_TOTAL_ERRORS_FOR_RETRY = 30
126 # A watermark of transient errors which we allow recovery from. If we hit
127 # errors infrequently, overall we're probably doing fine. For example, if
128 # we have one failure every 100 passes, then we probably don't want to fail
129 # right away. But if we hit a string of failures in a row, we want to abort.
131 # The watermark starts at 0 (and can never go below that). When this error
132 # level is exceeded, we stop uploading. When a failure happens, we add the
133 # fail adjustment, and when an upload succeeds, we add the pass adjustment.
134 # We want to penalize failures more so that we ramp up when there is a string
135 # of them, but then slowly back off as things start working.
138 # 0.0: Starting point.
139 # 0.0: Upload works, so add -0.5, and then clamp to 0.
140 # 1.0: Upload fails, so add 1.0.
141 # 2.0: Upload fails, so add 1.0.
142 # 1.5: Upload works, so add -0.5.
143 # 1.0: Upload works, so add -0.5.
144 ERROR_WATERMARK = 3.0
145 ERROR_ADJUST_FAIL = 1.0
146 ERROR_ADJUST_PASS = -0.5
149 def SymUpload(upload_url, sym_item):
150 """Upload a symbol file to a HTTP server
152 The upload is a multipart/form-data POST with the following parameters:
153 code_file: the basename of the module, e.g. "app"
154 code_identifier: the module file's identifier
155 debug_file: the basename of the debugging file, e.g. "app"
156 debug_identifier: the debug file's identifier, usually consisting of
157 the guid and age embedded in the pdb, e.g.
158 "11111111BBBB3333DDDD555555555555F"
159 version: the file version of the module, e.g. "1.2.3.4"
160 product: HTTP-friendly product name
161 os: the operating system that the module was built for
162 cpu: the CPU that the module was built for
163 symbol_file: the contents of the breakpad-format symbol file
166 upload_url: The crash URL to POST the |sym_file| to
167 sym_item: A SymbolItem containing the path to the breakpad symbol to upload
169 sym_header = sym_item.sym_header
170 sym_file = sym_item.sym_file
173 ('code_file', sym_header.name),
174 ('debug_file', sym_header.name),
175 ('debug_identifier', sym_header.id.replace('-', '')),
176 # The product/version fields are used by the server only for statistic
177 # purposes. They do not impact symbolization, so they're safe to set
178 # to any value all the time.
179 # In this case, we use it to help see the load our build system is
180 # placing on the server.
181 # Not sure what to set for the version. Maybe the git sha1 of this file.
182 # Note: the server restricts this to 30 chars.
184 ('product', 'ChromeOS'),
185 ('os', sym_header.os),
186 ('cpu', sym_header.cpu),
187 poster.encode.MultipartParam.from_file('symbol_file', sym_file),
190 # Scale the timeout based on the filesize.
191 timeout = max(os.path.getsize(sym_file) / UPLOAD_MIN_RATE, UPLOAD_MIN_TIMEOUT)
193 data, headers = poster.encode.multipart_encode(fields)
194 request = urllib2.Request(upload_url, data, headers)
195 request.add_header('User-agent', 'chromite.upload_symbols')
196 urllib2.urlopen(request, timeout=timeout)
199 def TestingSymUpload(upload_url, sym_item):
200 """A stub version of SymUpload for --testing usage"""
201 cmd = ['sym_upload', sym_item.sym_file, upload_url]
202 # Randomly fail 80% of the time (the retry logic makes this 80%/3 per file).
203 returncode = random.randint(1, 100) <= 80
204 cros_build_lib.Debug('would run (and return %i): %s', returncode,
205 cros_build_lib.CmdToStr(cmd))
207 output = 'Failed to send the symbol file.'
209 output = 'Successfully sent the symbol file.'
210 result = cros_build_lib.CommandResult(cmd=cmd, error=None, output=output,
211 returncode=returncode)
214 socket.error('[socket.error] forced test fail'),
215 httplib.BadStatusLine('[BadStatusLine] forced test fail'),
216 urllib2.HTTPError(upload_url, 400, '[HTTPError] forced test fail',
218 urllib2.URLError('[URLError] forced test fail'),
220 raise random.choice(exceptions)
225 def ErrorLimitHit(num_errors, watermark_errors):
226 """See if our error limit has been hit
229 num_errors: A multiprocessing.Value of the raw number of failures.
230 watermark_errors: A multiprocessing.Value of the current rate of failures.
233 True if our error limits have been exceeded.
235 return ((num_errors is not None and
236 num_errors.value > MAX_TOTAL_ERRORS_FOR_RETRY) or
237 (watermark_errors is not None and
238 watermark_errors.value > ERROR_WATERMARK))
241 def _UpdateCounter(counter, adj):
242 """Update |counter| by |adj|
244 Handle atomic updates of |counter|. Also make sure it does not
248 counter: A multiprocessing.Value to update
249 adj: The value to add to |counter|
252 clamp = 0 if type(adj) is int else 0.0
253 counter.value = max(clamp, counter.value + adj)
255 if hasattr(counter, 'get_lock'):
256 with counter.get_lock():
258 elif counter is not None:
262 def UploadSymbol(upload_url, sym_item, file_limit=DEFAULT_FILE_LIMIT,
263 sleep=0, num_errors=None, watermark_errors=None,
264 failed_queue=None, passed_queue=None):
265 """Upload |sym_item| to |upload_url|
268 upload_url: The crash server to upload things to
269 sym_item: A SymbolItem containing the path to the breakpad symbol to upload
270 file_limit: The max file size of a symbol file before we try to strip it
271 sleep: Number of seconds to sleep before running
272 num_errors: An object to update with the error count (needs a .value member)
273 watermark_errors: An object to track current error behavior (needs a .value)
274 failed_queue: When a symbol fails, add it to this queue
275 passed_queue: When a symbol passes, add it to this queue
278 The number of errors that were encountered.
280 sym_file = sym_item.sym_file
281 upload_item = sym_item
283 if num_errors is None:
284 num_errors = ctypes.c_int()
285 if ErrorLimitHit(num_errors, watermark_errors):
286 # Abandon ship! It's on fire! NOoooooooooooOOOoooooo.
288 failed_queue.put(sym_file)
292 # Keeps us from DoS-ing the symbol server.
295 cros_build_lib.Debug('uploading %s' % sym_file)
297 # Ideally there'd be a tempfile.SpooledNamedTemporaryFile that we could use.
298 with tempfile.NamedTemporaryFile(prefix='upload_symbols',
299 bufsize=0) as temp_sym_file:
301 # If the symbols size is too big, strip out the call frame info. The CFI
302 # is unnecessary for 32bit x86 targets where the frame pointer is used (as
303 # all of ours have) and it accounts for over half the size of the symbols
305 file_size = os.path.getsize(sym_file)
306 if file_size > file_limit:
307 cros_build_lib.Warning('stripping CFI from %s due to size %s > %s',
308 sym_file, file_size, file_limit)
309 temp_sym_file.writelines([x for x in open(sym_file, 'rb').readlines()
310 if not x.startswith('STACK CFI')])
312 upload_item = FakeItem(sym_file=temp_sym_file.name,
313 sym_header=sym_item.sym_header)
315 # Hopefully the crash server will let it through. But it probably won't.
316 # Not sure what the best answer is in this case.
317 file_size = os.path.getsize(upload_item.sym_file)
318 if file_size > CRASH_SERVER_FILE_LIMIT:
319 cros_build_lib.PrintBuildbotStepWarnings()
320 cros_build_lib.Warning('upload file %s is awfully large, risking '
321 'rejection by the symbol server (%s > %s)',
322 sym_file, file_size, CRASH_SERVER_FILE_LIMIT)
324 # Upload the symbol file.
327 cros_build_lib.TimedCommand(
328 retry_util.RetryException,
329 (urllib2.HTTPError, urllib2.URLError), MAX_RETRIES, SymUpload,
330 upload_url, upload_item, sleep=INITIAL_RETRY_DELAY,
331 timed_log_msg='upload of %10i bytes took %%s: %s' %
332 (file_size, os.path.basename(sym_file)))
336 passed_queue.put(sym_item)
337 except urllib2.HTTPError as e:
338 cros_build_lib.Warning('could not upload: %s: HTTP %s: %s',
339 os.path.basename(sym_file), e.code, e.reason)
340 except (urllib2.URLError, httplib.HTTPException, socket.error) as e:
341 cros_build_lib.Warning('could not upload: %s: %s',
342 os.path.basename(sym_file), e)
345 _UpdateCounter(watermark_errors, ERROR_ADJUST_PASS)
347 _UpdateCounter(num_errors, 1)
348 _UpdateCounter(watermark_errors, ERROR_ADJUST_FAIL)
350 failed_queue.put(sym_file)
352 return num_errors.value
355 # A dummy class that allows for stubbing in tests and SymUpload.
356 FakeItem = cros_build_lib.Collection(
357 'FakeItem', sym_file=None, sym_header=None, content=lambda x: '')
360 # TODO(build): Delete this if check. http://crbug.com/341152
362 class SymbolItem(isolateserver.BufferItem):
363 """Turn a sym_file into an isolateserver.Item"""
367 def __init__(self, sym_file):
368 sym_header = cros_generate_breakpad_symbols.ReadSymsHeader(sym_file)
369 super(SymbolItem, self).__init__(str(sym_header), self.ALGO)
370 self.sym_header = sym_header
371 self.sym_file = sym_file
374 def SymbolDeduplicatorNotify(dedupe_namespace, dedupe_queue):
375 """Send a symbol file to the swarming service
377 Notify the swarming service of a successful upload. If the notification fails
378 for any reason, we ignore it. We don't care as it just means we'll upload it
379 again later on, and the symbol server will handle that graciously.
381 This func runs in a different process from the main one, so we cannot share
382 the storage object. Instead, we create our own. This func stays alive for
383 the life of the process, so we only create one here overall.
386 dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
387 dedupe_queue: The queue to read SymbolItems from
389 if dedupe_queue is None:
394 with timeout_util.Timeout(DEDUPE_TIMEOUT):
395 storage = isolateserver.get_storage_api(constants.ISOLATESERVER,
397 for item in iter(dedupe_queue.get, None):
398 with timeout_util.Timeout(DEDUPE_TIMEOUT):
399 cros_build_lib.Debug('sending %s to dedupe server', item.sym_file)
400 storage.push(item, item.content(0))
401 cros_build_lib.Debug('sent %s', item.sym_file)
402 cros_build_lib.Info('dedupe notification finished; exiting')
404 sym_file = item.sym_file if (item and item.sym_file) else ''
405 cros_build_lib.Warning('posting %s to dedupe server failed',
406 os.path.basename(sym_file), exc_info=True)
408 # Keep draining the queue though so it doesn't fill up.
409 while dedupe_queue.get() is not None:
413 def SymbolDeduplicator(storage, sym_paths):
414 """Filter out symbol files that we've already uploaded
416 Using the swarming service, ask it to tell us which symbol files we've already
417 uploaded in previous runs and/or by other bots. If the query fails for any
418 reason, we'll just upload all symbols. This is fine as the symbol server will
419 do the right thing and this phase is purely an optimization.
421 This code runs in the main thread which is why we can re-use the existing
422 storage object. Saves us from having to recreate one all the time.
425 storage: An isolateserver.StorageApi object
426 sym_paths: List of symbol files to check against the dedupe server
429 List of symbol files that have not been uploaded before
434 items = [SymbolItem(x) for x in sym_paths]
437 with timeout_util.Timeout(DEDUPE_TIMEOUT):
438 items = storage.contains(items)
440 cros_build_lib.Warning('talking to dedupe server failed', exc_info=True)
446 """Guess if this is a tarball based on the filename."""
447 parts = path.split('.')
451 if parts[-1] == 'tar':
454 if parts[-2] == 'tar':
455 return parts[-1] in ('bz2', 'gz', 'xz')
457 return parts[-1] in ('tbz2', 'tbz', 'tgz', 'txz')
460 def SymbolFinder(tempdir, paths):
461 """Locate symbol files in |paths|
464 tempdir: Path to use for temporary files (caller will clean up).
465 paths: A list of input paths to walk. Files are returned w/out any checks.
466 Dirs are searched for files that end in ".sym". Urls are fetched and then
467 processed. Tarballs are unpacked and walked.
470 Yield every viable sym file.
473 # Pylint is confused about members of ParseResult.
475 o = urlparse.urlparse(p)
476 if o.scheme: # pylint: disable=E1101
477 # Support globs of filenames.
480 cros_build_lib.Info('processing files inside %s', p)
481 o = urlparse.urlparse(p)
482 cache_dir = commandline.GetCacheDir()
483 common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
484 tar_cache = cache.TarballCache(common_path)
485 key = ('%s%s' % (o.netloc, o.path)).split('/') # pylint: disable=E1101
486 # The common cache will not be LRU, removing the need to hold a read
487 # lock on the cached gsutil.
488 ref = tar_cache.Lookup(key)
491 except cros_build_lib.RunCommandError as e:
492 cros_build_lib.Warning('ignoring %s\n%s', p, e)
494 for p in SymbolFinder(tempdir, [ref.path]):
497 elif os.path.isdir(p):
498 for root, _, files in os.walk(p):
500 if f.endswith('.sym'):
501 yield os.path.join(root, f)
504 cros_build_lib.Info('processing files inside %s', p)
505 tardir = tempfile.mkdtemp(dir=tempdir)
506 cache.Untar(os.path.realpath(p), tardir)
507 for p in SymbolFinder(tardir, [tardir]):
514 def WriteQueueToFile(listing, queue, relpath=None):
515 """Write all the items in |queue| to the |listing|.
517 Note: The queue must have a sentinel None appended to the end.
520 listing: Where to write out the list of files.
521 queue: The queue of paths to drain.
522 relpath: If set, write out paths relative to this one.
525 # Still drain the queue so we make sure the producer has finished
526 # before we return. Otherwise, the queue might get destroyed too
527 # quickly which will trigger a traceback in the producer.
528 while queue.get() is not None:
532 with cros_build_lib.Open(listing, 'wb+') as f:
538 path = os.path.relpath(path, relpath)
539 f.write('%s\n' % path)
542 def UploadSymbols(board=None, official=False, server=None, breakpad_dir=None,
543 file_limit=DEFAULT_FILE_LIMIT, sleep=DEFAULT_SLEEP_DELAY,
544 upload_limit=None, sym_paths=None, failed_list=None,
545 root=None, retry=True, dedupe_namespace=None):
546 """Upload all the generated symbols for |board| to the crash server
548 You can use in a few ways:
549 * pass |board| to locate all of its symbols
550 * pass |breakpad_dir| to upload all the symbols in there
551 * pass |sym_paths| to upload specific symbols (or dirs of symbols)
554 board: The board whose symbols we wish to upload
555 official: Use the official symbol server rather than the staging one
556 server: Explicit server to post symbols to
557 breakpad_dir: The full path to the breakpad directory where symbols live
558 file_limit: The max file size of a symbol file before we try to strip it
559 sleep: How long to sleep in between uploads
560 upload_limit: If set, only upload this many symbols (meant for testing)
561 sym_paths: Specific symbol files (or dirs of sym files) to upload,
562 otherwise search |breakpad_dir|
563 failed_list: Write the names of all sym files we did not upload; can be a
564 filename or file-like object.
565 root: The tree to prefix to |breakpad_dir| (if |breakpad_dir| is not set)
566 retry: Whether we should retry failures.
567 dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
570 The number of errors that were encountered.
572 # TODO(build): Delete this assert.
573 assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
577 upload_url = OFFICIAL_UPLOAD_URL
579 cros_build_lib.Warning('unofficial builds upload to the staging server')
580 upload_url = STAGING_UPLOAD_URL
585 cros_build_lib.Info('uploading specified symbols to %s', upload_url)
587 if breakpad_dir is None:
588 breakpad_dir = os.path.join(
590 cros_generate_breakpad_symbols.FindBreakpadDir(board).lstrip('/'))
591 cros_build_lib.Info('uploading all symbols to %s from %s', upload_url,
593 sym_paths = [breakpad_dir]
595 # We use storage_query to ask the server about existing symbols. The
596 # storage_notify_proc process is used to post updates to the server. We
597 # cannot safely share the storage object between threads/processes, but
598 # we also want to minimize creating new ones as each object has to init
599 # new state (like server connections).
602 dedupe_limit = DEDUPE_LIMIT
603 dedupe_queue = multiprocessing.Queue()
605 with timeout_util.Timeout(DEDUPE_TIMEOUT):
606 storage_query = isolateserver.get_storage_api(constants.ISOLATESERVER,
609 cros_build_lib.Warning('initializing dedupe server connection failed',
614 # Can't use parallel.BackgroundTaskRunner because that'll create multiple
615 # processes and we want only one the whole time (see comment above).
616 storage_notify_proc = multiprocessing.Process(
617 target=SymbolDeduplicatorNotify, args=(dedupe_namespace, dedupe_queue))
619 bg_errors = multiprocessing.Value('i')
620 watermark_errors = multiprocessing.Value('f')
621 failed_queue = multiprocessing.Queue()
622 uploader = functools.partial(
623 UploadSymbol, upload_url, file_limit=file_limit, sleep=sleep,
624 num_errors=bg_errors, watermark_errors=watermark_errors,
625 failed_queue=failed_queue, passed_queue=dedupe_queue)
627 start_time = datetime.datetime.now()
628 Counters = cros_build_lib.Collection(
629 'Counters', upload_limit=upload_limit, uploaded_count=0, deduped_count=0)
630 counters = Counters()
632 def _Upload(queue, counters, files):
637 for item in SymbolDeduplicator(storage_query, files):
640 if counters.upload_limit == 0:
644 counters.uploaded_count += 1
645 if counters.upload_limit is not None:
646 counters.upload_limit -= 1
648 counters.deduped_count += (len(files) - missing_count)
651 storage_notify_proc.start()
653 with osutils.TempDir(prefix='upload_symbols.') as tempdir:
654 # For the first run, we collect the symbols that failed. If the
655 # overall failure rate was low, we'll retry them on the second run.
656 for retry in (retry, False):
657 # We need to limit ourselves to one upload at a time to avoid the server
658 # kicking in DoS protection. See these bugs for more details:
659 # http://crbug.com/209442
660 # http://crbug.com/212496
661 with parallel.BackgroundTaskRunner(uploader, processes=1) as queue:
663 for sym_file in SymbolFinder(tempdir, sym_paths):
664 dedupe_list.append(sym_file)
665 dedupe_len = len(dedupe_list)
666 if dedupe_len < dedupe_limit:
667 if (counters.upload_limit is None or
668 dedupe_len < counters.upload_limit):
671 # We check the counter before _Upload so that we don't keep talking
672 # to the dedupe server. Otherwise, we end up sending one symbol at
673 # a time to it and that slows things down a lot.
674 if counters.upload_limit == 0:
677 _Upload(queue, counters, dedupe_list)
679 _Upload(queue, counters, dedupe_list)
681 # See if we need to retry, and if we haven't failed too many times yet.
682 if not retry or ErrorLimitHit(bg_errors, watermark_errors):
686 failed_queue.put(None)
688 sym_path = failed_queue.get()
691 sym_paths.append(sym_path)
694 cros_build_lib.Warning('retrying %i symbols', len(sym_paths))
695 if counters.upload_limit is not None:
696 counters.upload_limit += len(sym_paths)
697 # Decrement the error count in case we recover in the second pass.
698 assert bg_errors.value >= len(sym_paths), \
699 'more failed files than errors?'
700 bg_errors.value -= len(sym_paths)
702 # No failed symbols, so just return now.
705 # If the user has requested it, save all the symbol files that we failed to
706 # upload to a listing file. This should help with recovery efforts later.
707 failed_queue.put(None)
708 WriteQueueToFile(failed_list, failed_queue, breakpad_dir)
711 cros_build_lib.Info('finished uploading; joining background process')
713 dedupe_queue.put(None)
715 # The notification might be slow going, so give it some time to finish.
716 # We have to poll here as the process monitor is watching for output and
717 # will kill us if we go silent for too long.
718 wait_minutes = DEDUPE_NOTIFY_TIMEOUT
719 while storage_notify_proc.is_alive() and wait_minutes > 0:
721 qsize = str(dedupe_queue.qsize())
724 cros_build_lib.Info('waiting up to %i minutes for ~%s notifications',
726 storage_notify_proc.join(60)
729 # The process is taking too long, so kill it and complain.
730 if storage_notify_proc.is_alive():
731 cros_build_lib.Warning('notification process took too long')
732 cros_build_lib.PrintBuildbotStepWarnings()
734 # Kill it gracefully first (traceback) before tacking it down harder.
735 pid = storage_notify_proc.pid
736 for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL):
737 cros_build_lib.Warning('sending %s to %i', signals.StrSignal(sig), pid)
738 # The process might have exited between the last check and the
739 # actual kill below, so ignore ESRCH errors.
743 if e.errno == errno.ESRCH:
748 if not storage_notify_proc.is_alive():
751 # Drain the queue so we don't hang when we finish.
753 cros_build_lib.Warning('draining the notify queue manually')
754 with timeout_util.Timeout(60):
756 while dedupe_queue.get_nowait():
760 except timeout_util.TimeoutError:
761 cros_build_lib.Warning('draining the notify queue failed; trashing it')
762 dedupe_queue.cancel_join_thread()
764 cros_build_lib.Info('uploaded %i symbols (%i were deduped) which took: %s',
765 counters.uploaded_count, counters.deduped_count,
766 datetime.datetime.now() - start_time)
768 return bg_errors.value
772 # TODO(build): Delete this assert.
773 assert isolateserver, 'Missing isolateserver import http://crbug.com/341152'
775 parser = commandline.ArgumentParser(description=__doc__)
777 parser.add_argument('sym_paths', type='path_or_uri', nargs='*', default=None,
778 help='symbol file or directory or URL or tarball')
779 parser.add_argument('--board', default=None,
780 help='board to build packages for')
781 parser.add_argument('--breakpad_root', type='path', default=None,
782 help='root directory for breakpad symbols')
783 parser.add_argument('--official_build', action='store_true', default=False,
784 help='point to official symbol server')
785 parser.add_argument('--server', type=str, default=None,
786 help='URI for custom symbol server')
787 parser.add_argument('--regenerate', action='store_true', default=False,
788 help='regenerate all symbols')
789 parser.add_argument('--upload-limit', type=int, default=None,
790 help='only upload # number of symbols')
791 parser.add_argument('--strip_cfi', type=int,
792 default=CRASH_SERVER_FILE_LIMIT - (10 * 1024 * 1024),
793 help='strip CFI data for files above this size')
794 parser.add_argument('--failed-list', type='path',
795 help='where to save a list of failed symbols')
796 parser.add_argument('--dedupe', action='store_true', default=False,
797 help='use the swarming service to avoid re-uploading')
798 parser.add_argument('--testing', action='store_true', default=False,
799 help='run in testing mode')
800 parser.add_argument('--yes', action='store_true', default=False,
801 help='answer yes to all prompts')
803 opts = parser.parse_args(argv)
808 cros_build_lib.Die('--regenerate may not be used with specific files')
810 if opts.board is None:
811 cros_build_lib.Die('--board is required')
813 if opts.breakpad_root and opts.regenerate:
814 cros_build_lib.Die('--regenerate may not be used with --breakpad_root')
817 # TODO(build): Kill off --testing mode once unittests are up-to-snuff.
818 cros_build_lib.Info('running in testing mode')
819 # pylint: disable=W0601,W0603
820 global INITIAL_RETRY_DELAY, SymUpload, DEFAULT_SLEEP_DELAY
821 INITIAL_RETRY_DELAY = DEFAULT_SLEEP_DELAY = 0
822 SymUpload = TestingSymUpload
824 dedupe_namespace = None
826 if opts.official_build and not opts.testing:
827 dedupe_namespace = OFFICIAL_DEDUPE_NAMESPACE
829 dedupe_namespace = STAGING_DEDUPE_NAMESPACE
832 prolog = '\n'.join(textwrap.wrap(textwrap.dedent("""
833 Uploading symbols for an entire Chromium OS build is really only
834 necessary for release builds and in a few cases for developers
835 to debug problems. It will take considerable time to run. For
836 developer debugging purposes, consider instead passing specific
839 if not cros_build_lib.BooleanPrompt(
840 prompt='Are you sure you want to upload all build symbols',
841 default=False, prolog=prolog):
842 cros_build_lib.Die('better safe than sorry')
846 ret += cros_generate_breakpad_symbols.GenerateBreakpadSymbols(
847 opts.board, breakpad_dir=opts.breakpad_root)
849 ret += UploadSymbols(opts.board, official=opts.official_build,
850 server=opts.server, breakpad_dir=opts.breakpad_root,
851 file_limit=opts.strip_cfi, sleep=DEFAULT_SLEEP_DELAY,
852 upload_limit=opts.upload_limit, sym_paths=opts.sym_paths,
853 failed_list=opts.failed_list,
854 dedupe_namespace=dedupe_namespace)
856 cros_build_lib.Error('encountered %i problem(s)', ret)
857 # Since exit(status) gets masked, clamp it to 1 so we don't inadvertently
858 # return 0 in case we are a multiple of the mask.
864 # We need this to run once per process. Do it at module import time as that
865 # will let us avoid doing it inline at function call time (see SymUpload) as
866 # that func might be called by the multiprocessing module which means we'll
867 # do the opener logic multiple times overall. Plus, if you're importing this
868 # module, it's a pretty good chance that you're going to need this.
869 poster.streaminghttp.register_openers()