bug 1304042 - buildsymbols should fail if running dsymutil fails. r=gps
☠☠ backed out by 689f6fee1846 ☠ ☠
authorTed Mielczarek <ted@mielczarek.org>
Tue, 20 Sep 2016 10:35:16 -0400
changeset 345513 f82827622145bcd160f63ec48e9606498beefc39
parent 345512 441dc90a1f8fc14d3fb0a494c7e4ab1f2bb08dad
child 345514 9dfac0d146bc6941e14dcf797e721c5de1e88d0c
push id6795
push userjlund@mozilla.com
push dateMon, 23 Jan 2017 14:19:46 +0000
treeherdermozilla-esr52@76101b503191 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersgps
bugs1304042
milestone52.0a1
bug 1304042 - buildsymbols should fail if running dsymutil fails. r=gps MozReview-Commit-ID: LteC4jGF3FD
Makefile.in
toolkit/crashreporter/tools/symbolstore.py
--- a/Makefile.in
+++ b/Makefile.in
@@ -308,17 +308,17 @@ ifdef MOZ_CRASHREPORTER
 	$(RM) '$(DIST)/$(SYMBOL_FULL_ARCHIVE_BASENAME).zip'
 	$(NSINSTALL) -D $(DIST)/crashreporter-symbols
 	OBJCOPY='$(OBJCOPY)' \
 	$(PYTHON) $(topsrcdir)/toolkit/crashreporter/tools/symbolstore.py \
 	  $(MAKE_SYM_STORE_ARGS)                                          \
 	  $(foreach dir,$(SYM_STORE_SOURCE_DIRS),-s $(dir))               \
 	  $(DUMP_SYMS_BIN)                                                \
 	  $(DIST)/crashreporter-symbols                                   \
-	  $(MAKE_SYM_STORE_PATH) | grep -iv test >                        \
+	  $(MAKE_SYM_STORE_PATH) >                                        \
 	  $(DIST)/crashreporter-symbols/$(SYMBOL_INDEX_NAME)
 	echo packing symbols
 	$(NSINSTALL) -D $(DIST)/$(PKG_PATH)
 	cd $(DIST)/crashreporter-symbols && \
           zip -r5D '../$(PKG_PATH)$(SYMBOL_FULL_ARCHIVE_BASENAME).zip' . -x '*test*' -x '*Test*'
 	cd $(DIST)/crashreporter-symbols && \
 	grep 'sym' $(SYMBOL_INDEX_NAME) > $(SYMBOL_INDEX_NAME).tmp && \
 	  mv $(SYMBOL_INDEX_NAME).tmp $(SYMBOL_INDEX_NAME)
--- a/toolkit/crashreporter/tools/symbolstore.py
+++ b/toolkit/crashreporter/tools/symbolstore.py
@@ -544,25 +544,28 @@ class Dumper:
     # subclasses override this if they want to support this
     def CopyDebug(self, file, debug_file, guid, code_file, code_id):
         pass
 
     def Finish(self, stop_pool=True):
         '''Process all pending jobs and any jobs their callbacks submit.
         By default, will shutdown the executor, but for testcases that
         need multiple runs, pass stop_pool = False.'''
+        success = True
         for job, callback in JobPool.as_completed():
             try:
                 res = job.result()
             except Exception as e:
                 self.output(sys.stderr, 'Job raised exception: %s' % e)
+                success = False
                 continue
             callback(res)
         if stop_pool:
             JobPool.shutdown()
+        return 0 if success else 1
 
     def Process(self, *args):
         """Process files recursively in args."""
         # We collect all files to process first then sort by size to schedule
         # larger files first because larger files tend to take longer and we
         # don't like long pole stragglers.
         files = set()
         for arg in args:
@@ -590,17 +593,20 @@ class Dumper:
                 if self.ShouldSkipDir(d):
                     dirs.remove(d)
             for f in files:
                 fullpath = os.path.join(root, f)
                 if self.ShouldProcess(fullpath):
                     yield fullpath
 
     def SubmitJob(self, file_key, func_name, args, callback):
-        """Submits a job to the pool of workers"""
+        """Submits a job to run |func_name| with |args| to the pool of
+        workers, calling |callback| when the job completes. If the function
+        call raises, the call to `Dumper.Finish` will return failure.
+        """
         JobPool.submit((self, Dumper.lock, Dumper.srcdirRepoInfo, func_name, args), callback)
 
     def ProcessFilesFinished(self, res):
         """Callback from multiprocesing when ProcessFilesWork finishes;
         run the cleanup work, if any"""
         # only run the cleanup function once per tuple of files
         self.files_record[res['files']] += 1
         if self.files_record[res['files']] == len(self.archs):
@@ -966,16 +972,17 @@ class Dumper_Mac(Dumper):
         try:
             cmd = ([dsymutil] +
                    [a.replace('-a ', '--arch=') for a in self.archs if a] +
                    [file])
             self.output_pid(sys.stderr, ' '.join(cmd))
             subprocess.check_call(cmd, stdout=open(os.devnull, 'w'))
         except subprocess.CalledProcessError as e:
             self.output_pid(sys.stderr, 'Error running dsymutil: %s' % str(e))
+            raise Exception('Error running dsymutil')
 
         if not os.path.exists(dsymbundle):
             # dsymutil won't produce a .dSYM for files without symbols
             self.output_pid(sys.stderr, "No symbols found in file: %s" % (file,))
             result['status'] = False
             result['files'] = (file, )
             return result
 
@@ -1038,41 +1045,41 @@ to canonical locations in the source rep
 """)
     (options, args) = parser.parse_args()
 
     #check to see if the pdbstr.exe exists
     if options.srcsrv:
         pdbstr = os.environ.get("PDBSTR_PATH")
         if not os.path.exists(pdbstr):
             print >> sys.stderr, "Invalid path to pdbstr.exe - please set/check PDBSTR_PATH.\n"
-            sys.exit(1)
+            return 1
 
     if len(args) < 3:
         parser.error("not enough arguments")
-        exit(1)
+        return 1
 
     try:
         manifests = validate_install_manifests(options.install_manifests)
     except (IOError, ValueError) as e:
         parser.error(str(e))
-        exit(1)
+        return 1
     file_mapping = make_file_mapping(manifests)
     dumper = GetPlatformSpecificDumper(dump_syms=args[0],
                                        symbol_path=args[1],
                                        copy_debug=options.copy_debug,
                                        archs=options.archs,
                                        srcdirs=options.srcdir,
                                        vcsinfo=options.vcsinfo,
                                        srcsrv=options.srcsrv,
                                        exclude=options.exclude,
                                        repo_manifest=options.repo_manifest,
                                        file_mapping=file_mapping)
 
     dumper.Process(*args[2:])
-    dumper.Finish()
+    return dumper.Finish()
 
 # run main if run directly
 if __name__ == "__main__":
     # set up the multiprocessing infrastructure before we start;
     # note that this needs to be in the __main__ guard, or else Windows will choke
     Dumper.GlobalInit()
 
-    main()
+    sys.exit(main())