Bug 1041944 - Printing mochitest failures at the end of the execution (with mach). r=ahal, a=test-only
authorAhmed Kachkach <ahmed.kachkach@gmail.com>
Tue, 22 Jul 2014 11:56:00 +0200
changeset 217481 b0d492f89974872417ab2e419a5d1565ea450048
parent 217480 251037f4b6c9bd5897e3b535b1ebd2cb614ce1eb
child 217482 dcec230e1104e21494b2365eef9dbba511d8a76e
push id515
push userraliiev@mozilla.com
push dateMon, 06 Oct 2014 12:51:51 +0000
treeherdermozilla-release@267c7a481bef [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal, test-only
bugs1041944
milestone33.0a2
Bug 1041944 - Printing mochitest failures at the end of the execution (with mach). r=ahal, a=test-only
testing/mochitest/mach_commands.py
testing/mochitest/runtests.py
--- a/testing/mochitest/mach_commands.py
+++ b/testing/mochitest/mach_commands.py
@@ -19,17 +19,16 @@ from mozbuild.base import (
 )
 
 from mach.decorators import (
     CommandArgument,
     CommandProvider,
     Command,
 )
 
-from mach.logging import StructuredHumanFormatter
 
 ADB_NOT_FOUND = '''
 The %s command requires the adb binary to be on your path.
 
 If you have a B2G build, this can be found in
 '%s/out/host/<platform>/bin'.
 '''.lstrip()
 
@@ -71,21 +70,16 @@ There should be an app called 'test-cont
 FLAVORS = {
     'mochitest': 'plain',
     'chrome': 'chrome',
     'browser-chrome': 'browser',
     'a11y': 'a11y',
     'webapprt-chrome': 'webapprt-chrome',
 }
 
-class UnexpectedFilter(logging.Filter):
-    def filter(self, record):
-        msg = getattr(record, 'params', {}).get('msg', '')
-        return 'TEST-UNEXPECTED-' in msg
-
 
 class MochitestRunner(MozbuildObject):
     """Easily run mochitests.
 
     This currently contains just the basics for running mochitests. We may want
     to hook up result parsing, etc.
     """
 
@@ -229,27 +223,24 @@ class MochitestRunner(MozbuildObject):
             test_paths = [self._wrap_path_argument(p).relpath() for p in test_paths]
 
         failure_file_path = os.path.join(self.statedir, 'mochitest_failures.json')
 
         if rerun_failures and not os.path.exists(failure_file_path):
             print('No failure file present. Did you run mochitests before?')
             return 1
 
-        from StringIO import StringIO
-
         # runtests.py is ambiguous, so we load the file/module manually.
         if 'mochitest' not in sys.modules:
             import imp
             path = os.path.join(self.mochitest_dir, 'runtests.py')
             with open(path, 'r') as fh:
                 imp.load_module('mochitest', fh, path,
                     ('.py', 'r', imp.PY_SOURCE))
 
-        import mozinfo
         import mochitest
         from manifestparser import TestManifest
         from mozbuild.testing import TestResolver
 
         # This is required to make other components happy. Sad, isn't it?
         os.chdir(self.topobjdir)
 
         # Automation installs its own stream handler to stdout. Since we want
@@ -373,40 +364,24 @@ class MochitestRunner(MozbuildObject):
         options = opts.verifyOptions(options, runner)
 
         if options is None:
             raise Exception('mochitest option validator failed.')
 
         # We need this to enable colorization of output.
         self.log_manager.enable_unstructured()
 
-        # Output processing is a little funky here. The old make targets
-        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
-        # after test execution. Ideally the test runner would expose a Python
-        # API for obtaining test results and we could just format failures
-        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
-        # all output to a buffer then "grep" the buffer after test execution.
-        # Bug 858197 tracks a Python API that would facilitate this.
-        test_output = StringIO()
-        handler = logging.StreamHandler(test_output)
-        handler.addFilter(UnexpectedFilter())
-        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
-        logging.getLogger().addHandler(handler)
-
         result = runner.runTests(options)
 
-        # Need to remove our buffering handler before we echo failures or else
-        # it will catch them again!
-        logging.getLogger().removeHandler(handler)
         self.log_manager.disable_unstructured()
-
-        if test_output.getvalue():
+        if runner.message_logger.errors:
             result = 1
-            for line in test_output.getvalue().splitlines():
-                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')
+            runner.message_logger.logger.warning("The following tests failed:")
+            for error in runner.message_logger.errors:
+                runner.message_logger.logger.log_raw(error)
 
         return result
 
 
 def MochitestCommand(func):
     """Decorator that adds shared command arguments to mochitest commands."""
 
     # This employs light Python magic. Keep in mind a decorator is just a
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -100,16 +100,19 @@ class MessageLogger(object):
     def __init__(self, logger, buffering=True, name='mochitest'):
         self.logger = logger
         self.buffering = buffering
         self.tests_started = False
 
         # Message buffering
         self.buffered_messages = []
 
+        # Failures reporting, after the end of the tests execution
+        self.errors = []
+
     def valid_message(self, obj):
         """True if the given object is a valid structured message (only does a superficial validation)"""
         return isinstance(obj, dict) and 'action' in obj and obj['action'] in MessageLogger.VALID_ACTIONS
 
     def parse_line(self, line):
         """Takes a given line of input (structured or not) and returns a list of structured messages"""
         line = line.rstrip().decode("UTF-8", "replace")
 
@@ -140,27 +143,31 @@ class MessageLogger(object):
             self.buffering = False
             return
 
         unstructured = False
         if 'unstructured' in message:
             unstructured = True
             message.pop('unstructured')
 
+        # Saving errors/failures to be shown at the end of the test run
+        is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
+        if is_error:
+            self.errors.append(message)
+
         # If we don't do any buffering, or the tests haven't started, or the message was unstructured, it is directly logged
         if not self.buffering or unstructured or not self.tests_started:
             self.logger.log_raw(message)
             return
 
         # If a test ended, we clean the buffer
         if message['action'] == 'test_end':
             self.buffered_messages = []
 
         # Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'
-        is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
         if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
             self.logger.log_raw(message)
             return
 
         # test_status messages buffering
         if is_error:
             if self.buffered_messages:
                 number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))