Bug 1041944 - Printing mochitest failures at the end of the execution (with mach). r=ahal
authorAhmed Kachkach <ahmed.kachkach@gmail.com>
Tue, 22 Jul 2014 11:56:00 +0200
changeset 210042 ef40e43550a2969cb1c602ff4baece8afd741054
parent 210041 305e0ca907a876162ecbe9b5cb403aba5da62aca
child 210043 b2a0854d295ec6141a75aeaa28e12baf15e673de
push id6741
push userraliiev@mozilla.com
push dateTue, 02 Sep 2014 16:57:58 +0000
treeherdermozilla-aurora@aed50d3edf33 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
Bug 1041944 - Printing mochitest failures at the end of the execution (with mach). r=ahal
--- a/testing/mochitest/mach_commands.py
+++ b/testing/mochitest/mach_commands.py
@@ -19,17 +19,16 @@ from mozbuild.base import (
 from mach.decorators import (
-from mach.logging import StructuredHumanFormatter
 The %s command requires the adb binary to be on your path.
 If you have a B2G build, this can be found in
@@ -71,21 +70,16 @@ There should be an app called 'test-cont
     'mochitest': 'plain',
     'chrome': 'chrome',
     'browser-chrome': 'browser',
     'a11y': 'a11y',
     'webapprt-chrome': 'webapprt-chrome',
-class UnexpectedFilter(logging.Filter):
-    def filter(self, record):
-        msg = getattr(record, 'params', {}).get('msg', '')
-        return 'TEST-UNEXPECTED-' in msg
 class MochitestRunner(MozbuildObject):
     """Easily run mochitests.
     This currently contains just the basics for running mochitests. We may want
     to hook up result parsing, etc.
@@ -229,27 +223,24 @@ class MochitestRunner(MozbuildObject):
             test_paths = [self._wrap_path_argument(p).relpath() for p in test_paths]
         failure_file_path = os.path.join(self.statedir, 'mochitest_failures.json')
         if rerun_failures and not os.path.exists(failure_file_path):
             print('No failure file present. Did you run mochitests before?')
             return 1
-        from StringIO import StringIO
         # runtests.py is ambiguous, so we load the file/module manually.
         if 'mochitest' not in sys.modules:
             import imp
             path = os.path.join(self.mochitest_dir, 'runtests.py')
             with open(path, 'r') as fh:
                 imp.load_module('mochitest', fh, path,
                     ('.py', 'r', imp.PY_SOURCE))
-        import mozinfo
         import mochitest
         from manifestparser import TestManifest
         from mozbuild.testing import TestResolver
         # This is required to make other components happy. Sad, isn't it?
         # Automation installs its own stream handler to stdout. Since we want
@@ -372,40 +363,24 @@ class MochitestRunner(MozbuildObject):
         options = opts.verifyOptions(options, runner)
         if options is None:
             raise Exception('mochitest option validator failed.')
         # We need this to enable colorization of output.
-        # Output processing is a little funky here. The old make targets
-        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
-        # after test execution. Ideally the test runner would expose a Python
-        # API for obtaining test results and we could just format failures
-        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
-        # all output to a buffer then "grep" the buffer after test execution.
-        # Bug 858197 tracks a Python API that would facilitate this.
-        test_output = StringIO()
-        handler = logging.StreamHandler(test_output)
-        handler.addFilter(UnexpectedFilter())
-        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
-        logging.getLogger().addHandler(handler)
         result = runner.runTests(options)
-        # Need to remove our buffering handler before we echo failures or else
-        # it will catch them again!
-        logging.getLogger().removeHandler(handler)
-        if test_output.getvalue():
+        if runner.message_logger.errors:
             result = 1
-            for line in test_output.getvalue().splitlines():
-                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')
+            runner.message_logger.logger.warning("The following tests failed:")
+            for error in runner.message_logger.errors:
+                runner.message_logger.logger.log_raw(error)
         return result
 def MochitestCommand(func):
     """Decorator that adds shared command arguments to mochitest commands."""
     # This employs light Python magic. Keep in mind a decorator is just a
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -88,16 +88,19 @@ class MessageLogger(object):
     def __init__(self, logger, buffering=True, name='mochitest'):
         self.logger = logger
         self.buffering = buffering
         self.tests_started = False
         # Message buffering
         self.buffered_messages = []
+        # Failures reporting, after the end of the tests execution
+        self.errors = []
     def valid_message(self, obj):
         """True if the given object is a valid structured message (only does a superficial validation)"""
         return isinstance(obj, dict) and 'action' in obj and obj['action'] in MessageLogger.VALID_ACTIONS
     def parse_line(self, line):
         """Takes a given line of input (structured or not) and returns a list of structured messages"""
         line = line.rstrip().decode("UTF-8", "replace")
@@ -128,27 +131,31 @@ class MessageLogger(object):
             self.buffering = False
         unstructured = False
         if 'unstructured' in message:
             unstructured = True
+        # Saving errors/failures to be shown at the end of the test run
+        is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
+        if is_error:
+            self.errors.append(message)
         # If we don't do any buffering, or the tests haven't started, or the message was unstructured, it is directly logged
         if not self.buffering or unstructured or not self.tests_started:
         # If a test ended, we clean the buffer
         if message['action'] == 'test_end':
             self.buffered_messages = []
         # Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'
-        is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
         if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
         # test_status messages buffering
         if is_error:
             if self.buffered_messages:
                 number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))