Backed out changeset f82827622145 (bug 1304042) for making the build fail on existing failures a=backout
authorWes Kocher <wkocher@mozilla.com>
Tue, 27 Sep 2016 12:49:18 -0700
changeset 358322 689f6fee184628d3dd968e3b6518cc811cfb3e43
parent 358321 fe4efa592a1cd8eba315a2c7c3d71bd34ea22376
child 358323 96fb89bb31d025197fd0d830403ab39233cb5f91
push id6795
push userjlund@mozilla.com
push dateMon, 23 Jan 2017 14:19:46 +0000
treeherdermozilla-beta@76101b503191 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1304042
milestone52.0a1
backs outf82827622145bcd160f63ec48e9606498beefc39
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset f82827622145 (bug 1304042) for making the build fail on existing failures a=backout
Makefile.in
toolkit/crashreporter/tools/symbolstore.py
--- a/Makefile.in
+++ b/Makefile.in
@@ -308,17 +308,17 @@ ifdef MOZ_CRASHREPORTER
 	$(RM) '$(DIST)/$(SYMBOL_FULL_ARCHIVE_BASENAME).zip'
 	$(NSINSTALL) -D $(DIST)/crashreporter-symbols
 	OBJCOPY='$(OBJCOPY)' \
 	$(PYTHON) $(topsrcdir)/toolkit/crashreporter/tools/symbolstore.py \
 	  $(MAKE_SYM_STORE_ARGS)                                          \
 	  $(foreach dir,$(SYM_STORE_SOURCE_DIRS),-s $(dir))               \
 	  $(DUMP_SYMS_BIN)                                                \
 	  $(DIST)/crashreporter-symbols                                   \
-	  $(MAKE_SYM_STORE_PATH) >                                        \
+	  $(MAKE_SYM_STORE_PATH) | grep -iv test >                        \
 	  $(DIST)/crashreporter-symbols/$(SYMBOL_INDEX_NAME)
 	echo packing symbols
 	$(NSINSTALL) -D $(DIST)/$(PKG_PATH)
 	cd $(DIST)/crashreporter-symbols && \
           zip -r5D '../$(PKG_PATH)$(SYMBOL_FULL_ARCHIVE_BASENAME).zip' . -x '*test*' -x '*Test*'
 	cd $(DIST)/crashreporter-symbols && \
 	grep 'sym' $(SYMBOL_INDEX_NAME) > $(SYMBOL_INDEX_NAME).tmp && \
 	  mv $(SYMBOL_INDEX_NAME).tmp $(SYMBOL_INDEX_NAME)
--- a/toolkit/crashreporter/tools/symbolstore.py
+++ b/toolkit/crashreporter/tools/symbolstore.py
@@ -544,28 +544,25 @@ class Dumper:
     # subclasses override this if they want to support this
     def CopyDebug(self, file, debug_file, guid, code_file, code_id):
         pass
 
     def Finish(self, stop_pool=True):
         '''Process all pending jobs and any jobs their callbacks submit.
         By default, will shutdown the executor, but for testcases that
         need multiple runs, pass stop_pool = False.'''
-        success = True
         for job, callback in JobPool.as_completed():
             try:
                 res = job.result()
             except Exception as e:
                 self.output(sys.stderr, 'Job raised exception: %s' % e)
-                success = False
                 continue
             callback(res)
         if stop_pool:
             JobPool.shutdown()
-        return 0 if success else 1
 
     def Process(self, *args):
         """Process files recursively in args."""
         # We collect all files to process first then sort by size to schedule
         # larger files first because larger files tend to take longer and we
         # don't like long pole stragglers.
         files = set()
         for arg in args:
@@ -593,20 +590,17 @@ class Dumper:
                 if self.ShouldSkipDir(d):
                     dirs.remove(d)
             for f in files:
                 fullpath = os.path.join(root, f)
                 if self.ShouldProcess(fullpath):
                     yield fullpath
 
     def SubmitJob(self, file_key, func_name, args, callback):
-        """Submits a job to run |func_name| with |args| to the pool of
-        workers, calling |callback| when the job completes. If the function
-        call raises, the call to `Dumper.Finish` will return failure.
-        """
+        """Submits a job to the pool of workers"""
         JobPool.submit((self, Dumper.lock, Dumper.srcdirRepoInfo, func_name, args), callback)
 
     def ProcessFilesFinished(self, res):
         """Callback from multiprocesing when ProcessFilesWork finishes;
         run the cleanup work, if any"""
         # only run the cleanup function once per tuple of files
         self.files_record[res['files']] += 1
         if self.files_record[res['files']] == len(self.archs):
@@ -972,17 +966,16 @@ class Dumper_Mac(Dumper):
         try:
             cmd = ([dsymutil] +
                    [a.replace('-a ', '--arch=') for a in self.archs if a] +
                    [file])
             self.output_pid(sys.stderr, ' '.join(cmd))
             subprocess.check_call(cmd, stdout=open(os.devnull, 'w'))
         except subprocess.CalledProcessError as e:
             self.output_pid(sys.stderr, 'Error running dsymutil: %s' % str(e))
-            raise Exception('Error running dsymutil')
 
         if not os.path.exists(dsymbundle):
             # dsymutil won't produce a .dSYM for files without symbols
             self.output_pid(sys.stderr, "No symbols found in file: %s" % (file,))
             result['status'] = False
             result['files'] = (file, )
             return result
 
@@ -1045,41 +1038,41 @@ to canonical locations in the source rep
 """)
     (options, args) = parser.parse_args()
 
     #check to see if the pdbstr.exe exists
     if options.srcsrv:
         pdbstr = os.environ.get("PDBSTR_PATH")
         if not os.path.exists(pdbstr):
             print >> sys.stderr, "Invalid path to pdbstr.exe - please set/check PDBSTR_PATH.\n"
-            return 1
+            sys.exit(1)
 
     if len(args) < 3:
         parser.error("not enough arguments")
-        return 1
+        exit(1)
 
     try:
         manifests = validate_install_manifests(options.install_manifests)
     except (IOError, ValueError) as e:
         parser.error(str(e))
-        return 1
+        exit(1)
     file_mapping = make_file_mapping(manifests)
     dumper = GetPlatformSpecificDumper(dump_syms=args[0],
                                        symbol_path=args[1],
                                        copy_debug=options.copy_debug,
                                        archs=options.archs,
                                        srcdirs=options.srcdir,
                                        vcsinfo=options.vcsinfo,
                                        srcsrv=options.srcsrv,
                                        exclude=options.exclude,
                                        repo_manifest=options.repo_manifest,
                                        file_mapping=file_mapping)
 
     dumper.Process(*args[2:])
-    return dumper.Finish()
+    dumper.Finish()
 
 # run main if run directly
 if __name__ == "__main__":
     # set up the multiprocessing infrastructure before we start;
     # note that this needs to be in the __main__ guard, or else Windows will choke
     Dumper.GlobalInit()
 
-    sys.exit(main())
+    main()