Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps, a=test-only
authorChris Manchester <cmanchester@mozilla.com>
Mon, 12 Sep 2016 23:02:44 -0700
changeset 355709 061867df5aabf908aa3d444d2b9a7527fe4d9195
parent 355708 13ed0212fab765bad762a64b412a5ee4ccbf1f2a
child 355710 b57b64179109580af8cad33124c49ddc81ab4c5f
push id6570
push userraliiev@mozilla.com
push dateMon, 14 Nov 2016 12:26:13 +0000
treeherdermozilla-beta@f455459b2ae5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersgps, test-only
bugs1282522
milestone51.0a2
Bug 1282522 - Ensure stderr is printed in case of a shutdown crash in a passing xpcshell test. r=gps, a=test-only There were two assumptions preventing this output from being logged, both related to the case a test passes and xpcshell returns 0. The first was that we would not find crash dumps in this case, and would therefore not need to log the full output of the test process (in the case xpcshell returned non-zero or a test failed, we would log this output prior to checking for crashes). The second was that if a test was eligible to retry, we wouldn't need to store a test's output at all, because this output would only relate to a failure that we would consider non-fatal. The first assumption does not hold because it's possible to fatally assert at shutdown in tests spawning child processes without causing a test failure or non-zero exit code. The second assumption followed from the first, and is violated when the first is violated, because in this case we would consider a found crash fatal even when a test was eligible to retry. This patch reverses these assumptions and logs the full output of a test that passes but produces crash dumps. It's not clear that the existing code intended for a crash to ever be considered fatal when a test was eligible to retry, but to change this criteria now would reduce our effective test coverage by ignoring crashes that are now considered fatal, so after this patch we continue to consider this scenario fatal. If it is determined these crashes are related to these tests running in parallel with other tests, or they are not relevant for some other reason, these tests should be run sequentially, or this criteria should be changed. MozReview-Commit-ID: 2PaFSGx2MVR
testing/xpcshell/runxpcshelltests.py
--- a/testing/xpcshell/runxpcshelltests.py
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -281,21 +281,20 @@ class XPCShellTestThread(Thread):
         We can sometimes get here before the process has terminated, which would
         cause removeDir() to fail - so check for the process and kill it if needed.
         """
         if proc and self.poll(proc) is None:
             self.kill(proc)
             message = "%s | Process still running after test!" % self.test_object['id']
             if self.retry:
                 self.log.info(message)
-                self.log_full_output(self.output_lines)
                 return
 
             self.log.error(message)
-            self.log_full_output(self.output_lines)
+            self.log_full_output()
             self.failCount = 1
 
     def testTimeout(self, proc):
         if self.test_object['expected'] == 'pass':
             expected = 'PASS'
         else:
             expected = 'FAIL'
 
@@ -303,17 +302,17 @@ class XPCShellTestThread(Thread):
             self.log.test_end(self.test_object['id'], 'TIMEOUT',
                               expected='TIMEOUT',
                               message="Test timed out")
         else:
             self.failCount = 1
             self.log.test_end(self.test_object['id'], 'TIMEOUT',
                               expected=expected,
                               message="Test timed out")
-            self.log_full_output(self.output_lines)
+            self.log_full_output()
 
         self.done = True
         self.timedout = True
         self.killTimeout(proc)
         self.log.info("xpcshell return code: %s" % self.getReturnCode(proc))
         self.postCheck(proc)
         self.clean_temp_dirs(self.test_object['path'])
 
@@ -524,33 +523,32 @@ class XPCShellTestThread(Thread):
             if 'message' in line:
                 line['message'] = self.fix_text_output(line['message'])
             if 'xpcshell_process' in line:
                 line['thread'] =  ' '.join([current_thread().name, line['xpcshell_process']])
             else:
                 line['thread'] = current_thread().name
             self.log.log_raw(line)
 
-    def log_full_output(self, output):
-        """Log output any buffered output from the test process"""
-        if not output:
+    def log_full_output(self):
+        """Logs any buffered output from the test process, and clears the buffer."""
+        if not self.output_lines:
             return
         self.log.info(">>>>>>>")
-        for line in output:
+        for line in self.output_lines:
             self.log_line(line)
         self.log.info("<<<<<<<")
+        self.output_lines = []
 
     def report_message(self, message):
         """Stores or logs a json log message in mozlog format."""
         if self.verbose:
             self.log_line(message)
         else:
-            # Tests eligible to retry will never dump their buffered output.
-            if not self.retry:
-                self.output_lines.append(message)
+            self.output_lines.append(message)
 
     def process_line(self, line_string):
         """ Parses a single line of output, determining its significance and
         reporting a message.
         """
         if not line_string.strip():
             return
 
@@ -741,49 +739,52 @@ class XPCShellTestThread(Thread):
             if status != expected:
                 if self.retry:
                     self.log.test_end(name, status, expected=status,
                                       message="Test failed or timed out, will retry")
                     self.clean_temp_dirs(path)
                     return
 
                 self.log.test_end(name, status, expected=expected, message=message)
-                self.log_full_output(self.output_lines)
+                self.log_full_output()
 
                 self.failCount += 1
 
                 if self.failureManifest:
                     with open(self.failureManifest, 'a') as f:
                         f.write('[%s]\n' % self.test_object['path'])
                         for k, v in self.test_object.items():
                             f.write('%s = %s\n' % (k, v))
 
             else:
                 # If TSan reports a race, dump the output, else we can't
                 # diagnose what the problem was.  See comments above about
                 # the significance of TSAN_EXIT_CODE_WITH_RACES.
                 if self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES:
-                    self.log_full_output(self.output_lines)
+                    self.log_full_output()
 
                 self.log.test_end(name, status, expected=expected, message=message)
                 if self.verbose:
-                    self.log_full_output(self.output_lines)
+                    self.log_full_output()
 
                 self.retry = False
 
                 if expect_pass:
                     self.passCount = 1
                 else:
                     self.todoCount = 1
 
             if self.checkForCrashes(self.tempDir, self.symbolsPath, test_name=name):
                 if self.retry:
                     self.clean_temp_dirs(path)
                     return
 
+                # If we assert during shutdown there's a chance the test has passed
+                # but we haven't logged full output, so do so here.
+                self.log_full_output()
                 self.failCount = 1
 
             if self.logfiles and process_output:
                 self.createLogFile(name, process_output)
 
         finally:
             self.postCheck(proc)
             self.clean_temp_dirs(path)