Bug 1552914 - [mozlog] Update formatters to reflect new known_intermittent field. r=jgraham
authorNikki S <nikkisharpley@gmail.com>
Fri, 24 May 2019 13:30:13 +0000
changeset 475373 ecf05645d8eb8d436dc908edd19403b790282084
parent 475372 49278023aa7d33d3ead6ce778dffe69d6e25f04a
child 475374 cbb2af10089cfcdd849726f14b84ca5754f9b9fb
push id36060
push usercbrindusan@mozilla.com
push dateFri, 24 May 2019 21:47:21 +0000
treeherdermozilla-central@ac95bdf3c0b3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjgraham
bugs1552914
milestone69.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1552914 - [mozlog] Update formatters to reflect new known_intermittent field. r=jgraham The in-tree log formatters have been updated to reflect the new `known_intermittent` field, ensuring that a status matching a `known_intermittent` status is not logged as an unexpected failure. A message is printed when there is a test status that matches this. A test for known intermittents has been added to the test_formatters, following the same testing style for pass or fail. Differential Revision: https://phabricator.services.mozilla.com/D32174
testing/mozbase/mozlog/mozlog/formatters/machformatter.py
testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py
testing/mozbase/mozlog/mozlog/structuredlog.py
testing/mozbase/mozlog/tests/test_formatters.py
testing/mozbase/mozlog/tests/test_structured.py
--- a/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
@@ -14,16 +14,17 @@ from ..handlers import SummaryHandler
 import six
 from functools import reduce
 
 color_dict = {
     'log_test_status_fail': 'red',
     'log_process_output': 'blue',
     'log_test_status_pass': 'green',
     'log_test_status_unexpected_fail': 'red',
+    'log_test_status_known_intermittent': 'orange',
     'time': 'cyan',
     'action': 'yellow',
     'pid': 'cyan',
     'heading': 'bold_yellow',
     'sub_heading': 'yellow',
     'error': 'red',
     'warning': 'yellow',
     'bold': 'bold',
@@ -131,83 +132,112 @@ class MachFormatter(base.BaseFormatter):
         rv = [action]
         if not self.summary_on_shutdown:
             rv.append(
                 self._format_suite_summary(
                     self.summary.current_suite,
                     self.summary.current))
         return "\n".join(rv)
 
-    def _format_expected(self, status, expected):
+    def _format_expected(self, status, expected, known_intermittent=[]):
         if status == expected:
             color = self.color_formatter.log_test_status_pass
             if expected not in ("PASS", "OK"):
                 color = self.color_formatter.log_test_status_fail
                 status = "EXPECTED-%s" % status
         else:
-            color = self.color_formatter.log_test_status_fail
-            if status in ("PASS", "OK"):
-                status = "UNEXPECTED-%s" % status
+            if status in known_intermittent:
+                color = self.color_formatter.log_test_status_known_intermittent
+                status = "KNOWN-INTERMITTENT-%s" % status
+            else:
+                color = self.color_formatter.log_test_status_fail
+                if status in ("PASS", "OK"):
+                    status = "UNEXPECTED-%s" % status
         return color(status)
 
     def _format_status(self, test, data):
         name = data.get("subtest", test)
-        rv = "%s %s" % (self._format_expected(
-            data["status"], data.get("expected", data["status"])), name)
+        rv = "%s %s" % (self._format_expected(data["status"],
+                        data.get("expected", data["status"]),
+                        data.get("known_intermittent", [])),
+                        name)
         if "message" in data:
             rv += " - %s" % data["message"]
         if "stack" in data:
             rv += self._format_stack(data["stack"])
         return rv
 
     def _format_stack(self, stack):
         return "\n%s\n" % self.color_formatter.dim(stack.strip("\n"))
 
     def _format_suite_summary(self, suite, summary):
         count = summary['counts']
         logs = summary['unexpected_logs']
+        intermittent_logs = summary['intermittent_logs']
 
         rv = [
             "",
             self.color_formatter.sub_heading(suite),
             self.color_formatter.sub_heading(
                 "~" * len(suite))]
 
         # Format check counts
         checks = self.summary.aggregate('count', count)
         rv.append("Ran {} checks ({})".format(sum(checks.values()), ', '.join(
             ['{} {}s'.format(v, k) for k, v in sorted(checks.items()) if v])))
 
         # Format expected counts
         checks = self.summary.aggregate('expected', count, include_skip=False)
-        rv.append("Expected results: {}".format(sum(checks.values())))
+        intermittent_checks = self.summary.aggregate('known_intermittent',
+                                                     count, include_skip=False)
+        intermittents = sum(intermittent_checks.values())
+        known = " ({} known intermittents)".format(intermittents) if intermittents else ""
+        rv.append("Expected results: {}{}".format(sum(checks.values()), known))
 
         # Format skip counts
         skip_tests = count["test"]["expected"]["skip"]
         skip_subtests = count["subtest"]["expected"]["skip"]
         if skip_tests:
             skipped = "Skipped: {} tests".format(skip_tests)
             if skip_subtests:
                 skipped = "{}, {} subtests".format(skipped, skip_subtests)
             rv.append(skipped)
 
         # Format unexpected counts
         checks = self.summary.aggregate('unexpected', count)
         unexpected_count = sum(checks.values())
+        rv.append("Unexpected results: {}".format(unexpected_count))
         if unexpected_count:
-            rv.append("Unexpected results: {}".format(unexpected_count))
             for key in ('test', 'subtest', 'assert'):
                 if not count[key]['unexpected']:
                     continue
                 status_str = ", ".join(["{} {}".format(n, s)
                                         for s, n in sorted(count[key]['unexpected'].items())])
                 rv.append("  {}: {} ({})".format(
                           key, sum(count[key]['unexpected'].values()), status_str))
 
         # Format status
+        if intermittents > 0:
+            heading = "Known Intermittent Results"
+            rv.extend(["", self.color_formatter.heading(heading),
+                       self.color_formatter.heading("-" * len(heading))])
+            if count['subtest']['count']:
+                for test_id, results in intermittent_logs.items():
+                    test = self._get_file_name(test_id)
+                    rv.append(self.color_formatter.bold(test))
+                    for data in results:
+                        rv.append("  %s" % self._format_status(test, data).rstrip())
+            else:
+                for test_id, results in intermittent_logs.items():
+                    test = self._get_file_name(test_id)
+                    assert len(results) == 1
+                    data = results[0]
+                    assert "subtest" not in data
+                    rv.append(self._format_status(test, data).rstrip())
+
         if not any(count[key]["unexpected"] for key in ('test', 'subtest', 'assert')):
             rv.append(self.color_formatter.log_test_status_pass("OK"))
         else:
             heading = "Unexpected Results"
             rv.extend(["", self.color_formatter.heading(heading),
                        self.color_formatter.heading("-" * len(heading))])
             if count['subtest']['count']:
                 for test_id, results in logs.items():
@@ -227,17 +257,18 @@ class MachFormatter(base.BaseFormatter):
 
     def test_start(self, data):
         action = self.color_formatter.action(data['action'].upper())
         return "%s: %s" % (action, self._get_test_id(data))
 
     def test_end(self, data):
         subtests = self._get_subtest_data(data)
 
-        if "expected" in data:
+        if ("expected" in data and
+                data["status"] not in data.get("known_intermittent", [])):
             parent_unexpected = True
             expected_str = ", expected %s" % data["expected"]
         else:
             parent_unexpected = False
             expected_str = ""
 
         has_screenshots = "reftest_screenshots" in data.get("extra", {})
 
@@ -262,16 +293,22 @@ class MachFormatter(base.BaseFormatter):
                     rv += " - %s" % message
                 if "stack" in data:
                     rv += self._format_stack(data["stack"])
             elif not self.verbose:
                 rv += "\n"
                 for d in unexpected:
                     rv += self._format_status(data['test'], d)
 
+        intermittents = self.summary.current["intermittent_logs"].get(data["test"])
+        if intermittents:
+            rv += "\n"
+            for d in intermittents:
+                rv += self._format_status(data['test'], d)
+
         if "expected" not in data and not bool(subtests['unexpected']):
             color = self.color_formatter.log_test_status_pass
         else:
             color = self.color_formatter.log_test_status_unexpected_fail
 
         action = color(data['action'].upper())
         rv = "%s: %s" % (action, rv)
         if has_screenshots and self.enable_screenshot:
@@ -352,18 +389,20 @@ class MachFormatter(base.BaseFormatter):
         test = self._get_test_id(data)
         if test not in self.status_buffer:
             self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0}
         self.status_buffer[test]["count"] += 1
 
         if data["status"] == "PASS":
             self.status_buffer[test]["pass"] += 1
 
-        if 'expected' in data:
+        if ('expected' in data and
+                data["status"] not in data.get("known_intermittent", [])):
             self.status_buffer[test]["unexpected"] += 1
+
         if self.verbose:
             return self._format_status(test, data).rstrip('\n')
 
     def assertion_count(self, data):
         if data["min_expected"] <= data["count"] <= data["max_expected"]:
             return
 
         if data["min_expected"] != data["max_expected"]:
--- a/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
@@ -175,29 +175,34 @@ class TbplFormatter(BaseFormatter):
 
     def _format_status(self, data):
         message = "- " + data["message"] if "message" in data else ""
         if "stack" in data:
             message += "\n%s" % data["stack"]
         if message and message[-1] == "\n":
             message = message[:-1]
 
+        status = data["status"]
+
         if "expected" in data:
-            if not message:
-                message = "- expected %s" % data["expected"]
-            failure_line = "TEST-UNEXPECTED-%s | %s | %s %s\n" % (
-                data["status"], data["test"], data["subtest"],
-                message)
-            if data["expected"] != "PASS":
-                info_line = "TEST-INFO | expected %s\n" % data["expected"]
-                return failure_line + info_line
-            return failure_line
+            if status in data.get("known_intermittent", []):
+                status = "TEST-KNOWN-INTERMITTENT-%s" % status
+            else:
+                if not message:
+                    message = "- expected %s" % data["expected"]
+                failure_line = "TEST-UNEXPECTED-%s | %s | %s %s\n" % (
+                    status, data["test"], data["subtest"],
+                    message)
+                if data["expected"] != "PASS":
+                    info_line = "TEST-INFO | expected %s\n" % data["expected"]
+                    return failure_line + info_line
+                return failure_line
 
         return "TEST-%s | %s | %s %s\n" % (
-            data["status"], data["test"], data["subtest"],
+            status, data["test"], data["subtest"],
             message)
 
     def test_end(self, data):
         rv = []
         if self.compact and self.subtests_count:
             print_context = "expected" in data
             rv.append(self._format_subtests(data.get("component"),
                                             subtract_context=print_context))
@@ -221,39 +226,44 @@ class TbplFormatter(BaseFormatter):
                 screenshot_msg = ("\nREFTEST   IMAGE 1 (TEST): data:image/png;base64,%s\n"
                                   "REFTEST   IMAGE 2 (REFERENCE): data:image/png;base64,%s") % (
                                       screenshots[0]["screenshot"],
                                       screenshots[2]["screenshot"])
             elif len(screenshots) == 1:
                 screenshot_msg = ("\nREFTEST   IMAGE: data:image/png;base64,%s" %
                                   screenshots[0]["screenshot"])
 
+        status = data['status']
+
         if "expected" in data:
-            message = data.get("message", "")
-            if not message:
-                message = "expected %s" % data["expected"]
-            if "stack" in data:
-                message += "\n%s" % data["stack"]
-            if message and message[-1] == "\n":
-                message = message[:-1]
-
-            message += screenshot_msg
+            if status in data.get("known_intermittent", []):
+                status = "TEST-KNOWN-INTERMITTENT-%s" % status
+            else:
+                message = data.get("message", "")
+                if not message:
+                    message = "expected %s" % data["expected"]
+                if "stack" in data:
+                    message += "\n%s" % data["stack"]
+                if message and message[-1] == "\n":
+                    message = message[:-1]
 
-            failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
-                data["status"], test_id, message)
+                message += screenshot_msg
+
+                failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
+                    data["status"], test_id, message)
 
-            if data["expected"] not in ("PASS", "OK"):
-                expected_msg = "expected %s | " % data["expected"]
-            else:
-                expected_msg = ""
-            info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
+                if data["expected"] not in ("PASS", "OK"):
+                    expected_msg = "expected %s | " % data["expected"]
+                else:
+                    expected_msg = ""
+                info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
 
-            return failure_line + info_line
+                return failure_line + info_line
 
-        sections = ["TEST-%s" % data['status'], test_id]
+        sections = ["TEST-%s" % status, test_id]
         if duration_msg:
             sections.append(duration_msg)
         rv.append(' | '.join(sections) + '\n')
         if screenshot_msg:
             rv.append(screenshot_msg[1:] + "\n")
         return "".join(rv)
 
     def suite_end(self, data):
--- a/testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/summaryhandler.py
@@ -43,44 +43,55 @@ class SummaryHandler(LogHandler):
                 '<check>': {
                   'count': int,
                   'expected': {
                     '<status>': int,
                   },
                   'unexpected': {
                     '<status>': int,
                   },
+                  'known_intermittent': {
+                    '<status>': int,
+                  },
                 },
               },
               'unexpected_logs': {
                 '<test>': [<data>]
+              },
+              'intermittent_logs': {
+                '<test>': [<data>]
               }
             }
 
         Valid values for <check> are `test`, `subtest` and `assert`. Valid
         <status> keys are defined in the :py:mod:`mozlog.logtypes` module.  The
         <test> key is the id as logged by `test_start`. Finally the <data>
         field is the log data from any `test_end` or `test_status` log messages
         that have an unexpected result.
+
+        Mozlog's structuredlog has a `known_intermittent` field indicating if a
+        `test` and `subtest` <status> are expected to arise intermittently.
+        Known intermittent results are logged as both as `expected` and
+        `known_intermittent`.
         """
         return self.summary[suite]
 
     def __iter__(self):
         """Iterate over summaries.
 
         Yields a tuple of (suite, summary). The summary returned is
         the same format as returned by 'get'.
         """
         for suite, data in six.iteritems(self.summary):
             yield suite, data
 
     @classmethod
     def aggregate(cls, key, counts, include_skip=True):
         """Helper method for aggregating count data by 'key' instead of by 'check'."""
-        assert key in ('count', 'expected', 'unexpected')
+        assert key in ('count', 'expected', 'unexpected', 'known_intermittent')
 
         res = defaultdict(int)
         for check, val in counts.items():
             if key == 'count':
                 res[check] += val[key]
                 continue
 
             for status, num in val[key].items():
@@ -93,55 +104,75 @@ class SummaryHandler(LogHandler):
         self.current_suite = data.get('name', 'suite {}'.format(len(self.summary) + 1))
         if self.current_suite not in self.summary:
             self.summary[self.current_suite] = {
                 'counts': {
                     'test': {
                         'count': 0,
                         'expected': defaultdict(int),
                         'unexpected': defaultdict(int),
+                        'known_intermittent': defaultdict(int),
                     },
                     'subtest': {
                         'count': 0,
                         'expected': defaultdict(int),
                         'unexpected': defaultdict(int),
+                        'known_intermittent': defaultdict(int),
                     },
                     'assert': {
                         'count': 0,
                         'expected': defaultdict(int),
                         'unexpected': defaultdict(int),
+                        'known_intermittent': defaultdict(int),
                     }
                 },
                 'unexpected_logs': OrderedDict(),
+                'intermittent_logs': OrderedDict(),
             }
 
     def test_start(self, data):
         self.current['counts']['test']['count'] += 1
 
     def test_status(self, data):
         logs = self.current['unexpected_logs']
+        intermittent_logs = self.current['intermittent_logs']
         count = self.current['counts']
         count['subtest']['count'] += 1
 
         if 'expected' in data:
-            count['subtest']['unexpected'][data['status'].lower()] += 1
-            if data['test'] not in logs:
-                logs[data['test']] = []
-            logs[data['test']].append(data)
+            if data['status'] not in data.get('known_intermittent', []):
+                count['subtest']['unexpected'][data['status'].lower()] += 1
+                if data['test'] not in logs:
+                    logs[data['test']] = []
+                logs[data['test']].append(data)
+            else:
+                count['subtest']['expected'][data['status'].lower()] += 1
+                count['subtest']['known_intermittent'][data['status'].lower()] += 1
+                if data['test'] not in intermittent_logs:
+                    intermittent_logs[data['test']] = []
+                intermittent_logs[data['test']].append(data)
         else:
             count['subtest']['expected'][data['status'].lower()] += 1
 
     def test_end(self, data):
         logs = self.current['unexpected_logs']
+        intermittent_logs = self.current['intermittent_logs']
         count = self.current['counts']
         if 'expected' in data:
-            count['test']['unexpected'][data['status'].lower()] += 1
-            if data['test'] not in logs:
-                logs[data['test']] = []
-            logs[data['test']].append(data)
+            if data['status'] not in data.get('known_intermittent', []):
+                count['test']['unexpected'][data['status'].lower()] += 1
+                if data['test'] not in logs:
+                    logs[data['test']] = []
+                logs[data['test']].append(data)
+            else:
+                count['test']['expected'][data['status'].lower()] += 1
+                count['test']['known_intermittent'][data['status'].lower()] += 1
+                if data['test'] not in intermittent_logs:
+                    intermittent_logs[data['test']] = []
+                intermittent_logs[data['test']].append(data)
         else:
             count['test']['expected'][data['status'].lower()] += 1
 
     def assertion_count(self, data):
         count = self.current['counts']
         count['assert']['count'] += 1
 
         if data['min_expected'] <= data['count'] <= data['max_expected']:
--- a/testing/mozbase/mozlog/mozlog/structuredlog.py
+++ b/testing/mozbase/mozlog/mozlog/structuredlog.py
@@ -31,23 +31,27 @@ Allowed actions, and subfields:
 
   test_end
       test - ID for the test
       status [PASS | FAIL | OK | ERROR |
               TIMEOUT | CRASH | ASSERT | SKIP] - test status
       expected [As for status] - Status that the test was expected to get,
                                  or absent if the test got the expected status
       extra - Dictionary of harness-specific extra information e.g. debug info
+      known_intermittent - List of known intermittent statuses that should
+                           not fail a test. eg. ['FAIL', 'TIMEOUT']
 
   test_status
       test - ID for the test
       subtest - Name of the subtest
       status [PASS | FAIL | TIMEOUT | NOTRUN | SKIP] - test status
       expected [As for status] - Status that the subtest was expected to get,
                                  or absent if the subtest got the expected status
+      known_intermittent - List of known intermittent statuses that should
+                           not fail a test. eg. ['FAIL', 'TIMEOUT']
 
   process_output
       process - PID of the process
       command - Command line of the process
       data - Output data from the process
 
   assertion_count
       count - Number of assertions produced
@@ -351,17 +355,17 @@ class StructuredLogger(object):
 
     @log_action(TestId("test"),
                 Unicode("subtest"),
                 SubStatus("status"),
                 SubStatus("expected", default="PASS"),
                 Unicode("message", default=None, optional=True),
                 Unicode("stack", default=None, optional=True),
                 Dict(Any, "extra", default=None, optional=True),
-                List(SubStatus, "expected_intermittent", default=None,
+                List(SubStatus, "known_intermittent", default=None,
                      optional=True))
     def test_status(self, data):
         """
         Log a test_status message indicating a subtest result. Tests that
         do not have subtests are not expected to produce test_status messages.
 
         :param test: Identifier of the test that produced the result.
         :param subtest: Name of the subtest.
@@ -384,17 +388,17 @@ class StructuredLogger(object):
         self._log_data("test_status", data)
 
     @log_action(TestId("test"),
                 Status("status"),
                 Status("expected", default="OK"),
                 Unicode("message", default=None, optional=True),
                 Unicode("stack", default=None, optional=True),
                 Dict(Any, "extra", default=None, optional=True),
-                List(Status, "expected_intermittent", default=None,
+                List(Status, "known_intermittent", default=None,
                      optional=True))
     def test_end(self, data):
         """
         Log a test_end message indicating that a test completed. For tests
         with subtests this indicates whether the overall test completed without
         errors. For tests without subtests this indicates the test result
         directly.
 
--- a/testing/mozbase/mozlog/tests/test_formatters.py
+++ b/testing/mozbase/mozlog/tests/test_formatters.py
@@ -30,16 +30,17 @@ FORMATS = {
  0:00.00 TEST_START: test_baz
  0:00.00 TEST_END: FAIL
  0:00.00 SUITE_END
 
 suite 1
 ~~~~~~~
 Ran 4 checks (1 subtests, 3 tests)
 Expected results: 4
+Unexpected results: 0
 OK
 """.lstrip(b'\n')),
         ('mach', {'verbose': True}, b"""
  0:00.00 SUITE_START: running 3 tests
  0:00.00 TEST_START: test_foo
  0:00.00 TEST_END: OK
  0:00.00 TEST_START: test_bar
  0:00.00 PASS a subtest
@@ -47,16 +48,17 @@ OK
  0:00.00 TEST_START: test_baz
  0:00.00 TEST_END: FAIL
  0:00.00 SUITE_END
 
 suite 1
 ~~~~~~~
 Ran 4 checks (1 subtests, 3 tests)
 Expected results: 4
+Unexpected results: 0
 OK
 """.lstrip(b'\n')),
     ],
 
     'FAIL': [
         ('mach', {}, b"""
  0:00.00 SUITE_START: running 3 tests
  0:00.00 TEST_START: test_foo
@@ -121,16 +123,72 @@ test_bar
   FAIL a subtest - expected 0 got 1
     SimpleTest.is@SimpleTest/SimpleTest.js:312:5
     @caps/tests/mochitest/test_bug246699.html:53:1
   TIMEOUT another subtest
 test_baz
   UNEXPECTED-PASS test_baz
 """.lstrip(b'\n')),
     ],
+
+    'KNOWN-INTERMITTENT': [
+        ('mach', {}, b"""
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL
+KNOWN-INTERMITTENT-FAIL test_foo
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+suite 1
+~~~~~~~
+Ran 4 checks (1 subtests, 3 tests)
+Expected results: 4 (2 known intermittents)
+Unexpected results: 0
+
+Known Intermittent Results
+--------------------------
+test_foo
+  KNOWN-INTERMITTENT-FAIL test_foo
+test_bar
+  KNOWN-INTERMITTENT-PASS a subtest
+OK
+""".lstrip(b'\n')),
+        ('mach', {'verbose': True}, b"""
+ 0:00.00 SUITE_START: running 3 tests
+ 0:00.00 TEST_START: test_foo
+ 0:00.00 TEST_END: FAIL
+KNOWN-INTERMITTENT-FAIL test_foo
+ 0:00.00 TEST_START: test_bar
+ 0:00.00 KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_END: Test OK. Subtests passed 1/1. Unexpected 0
+KNOWN-INTERMITTENT-PASS a subtest
+ 0:00.00 TEST_START: test_baz
+ 0:00.00 TEST_END: FAIL
+ 0:00.00 SUITE_END
+
+suite 1
+~~~~~~~
+Ran 4 checks (1 subtests, 3 tests)
+Expected results: 4 (2 known intermittents)
+Unexpected results: 0
+
+Known Intermittent Results
+--------------------------
+test_foo
+  KNOWN-INTERMITTENT-FAIL test_foo
+test_bar
+  KNOWN-INTERMITTENT-PASS a subtest
+OK
+""".lstrip(b'\n')),
+    ],
 }
 
 
 def ids(test):
     ids = []
     for value in FORMATS[test]:
         args = ", ".join(["{}={}".format(k, v) for k, v in value[1].items()])
         if args:
@@ -196,11 +254,36 @@ def test_fail(name, opts, expected):
     logger.test_end('test_baz', 'PASS', 'FAIL')
     logger.suite_end()
 
     result = buf.getvalue()
     print("Dumping result for copy/paste:")
     print(result)
     assert result == expected
 
+@pytest.mark.parametrize("name,opts,expected", FORMATS['KNOWN-INTERMITTENT'],
+                         ids=ids('KNOWN-INTERMITTENT'))
+def test_known_intermittent(name, opts, expected):
+    buf = BytesIO()
+    fmt = formatters[name](**opts)
+    logger = StructuredLogger('test_logger')
+    logger.add_handler(StreamHandler(buf, fmt))
+
+    logger.suite_start(['test_foo', 'test_bar', 'test_baz'])
+    logger.test_start('test_foo')
+    logger.test_end('test_foo', 'FAIL', 'PASS', known_intermittent=['FAIL'])
+    logger.test_start('test_bar')
+    logger.test_status('test_bar', 'a subtest', 'PASS', 'FAIL',
+                       known_intermittent=['PASS'])
+    logger.test_end('test_bar', 'OK')
+    logger.test_start('test_baz')
+    logger.test_end('test_baz', 'FAIL', 'FAIL', 'expected 0 got 1',
+                    known_intermittent=['PASS'])
+    logger.suite_end()
+
+    result = buf.getvalue()
+    print("Dumping result for copy/paste:")
+    print(result)
+    assert result == expected
+
 
 if __name__ == '__main__':
     mozunit.main()
--- a/testing/mozbase/mozlog/tests/test_structured.py
+++ b/testing/mozbase/mozlog/tests/test_structured.py
@@ -234,25 +234,26 @@ class TestStructuredLog(BaseStructuredTe
                                 "subtest": "subtest name",
                                 "status": "FAIL",
                                 "expected": "PASS",
                                 "test": "test1",
                                 "stack": "many\nlines\nof\nstack"})
         self.logger.test_end("test1", "OK")
         self.logger.suite_end()
 
-    def test_status_expected_intermittent(self):
+    def test_status_known_intermittent(self):
         self.logger.suite_start([])
         self.logger.test_start("test1")
-        self.logger.test_status("test1", "subtest name", "fail", expected_intermittent=["FAIL"])
+        self.logger.test_status("test1", "subtest name", "fail",
+                                known_intermittent=["FAIL"])
         self.assert_log_equals({"action": "test_status",
                                 "subtest": "subtest name",
                                 "status": "FAIL",
                                 "expected": "PASS",
-                                "expected_intermittent": ["FAIL"],
+                                "known_intermittent": ["FAIL"],
                                 "test": "test1"})
         self.logger.test_end("test1", "OK")
         self.logger.suite_end()
 
     def test_status_not_started(self):
         self.logger.test_status("test_UNKNOWN", "subtest", "PASS")
         self.assertTrue(self.pop_last_item()["message"].startswith(
             "test_status for test_UNKNOWN logged while not in progress. Logged with data: {"))
@@ -869,17 +870,17 @@ Unexpected results: 3
                                 status="PASS", expected="PASS")
         self.logger.test_end("test2", status="OK", expected="OK")
 
         self.set_position()
         self.logger.suite_end()
 
         self.assertIn("OK", self.loglines)
         self.assertIn("Expected results: 5", self.loglines)
-        self.assertNotIn("Unexpected results: 0", self.loglines)
+        self.assertIn("Unexpected results: 0", self.loglines)
 
     def test_process_start(self):
         self.logger.process_start(1234)
         self.assertIn("Started process `1234`", self.loglines[0])
 
     def test_process_start_with_command(self):
         self.logger.process_start(1234, command='test cmd')
         self.assertIn("Started process `1234` (test cmd)", self.loglines[0])