Bug 1321127 - Add a assertion_count action to mozlog, r=ahal
authorJames Graham <james@hoppipolla.co.uk>
Wed, 30 Nov 2016 17:26:16 +0000
changeset 370405 2be3e937ecd92ed4cd0404cfaa71fa451805e8fd
parent 370404 c6face1d7183eea175fd7b00e576d3312f3fd14c
child 370406 48ce71d365d86d41375eceb7bfc9e29a0c8c5fb1
push id6996
push userjlorenzo@mozilla.com
push dateMon, 06 Mar 2017 20:48:21 +0000
treeherdermozilla-beta@d89512dab048 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal
bugs1321127
milestone53.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1321127 - Add a assertion_count action to mozlog, r=ahal This is intended as a structured replacement for the assertion checks that previously used unstructured logs. It adds a log action assertion_count, which takes the actual number of assertions observed during a test, the minimum expeced number and the maximum expected number. It also updates the reftest harness to use this logging. MozReview-Commit-ID: JgjLlaYuvSG
layout/tools/reftest/reftest.jsm
testing/modules/StructuredLog.jsm
testing/mozbase/mozlog/mozlog/formatters/machformatter.py
testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
testing/mozbase/mozlog/mozlog/formatters/unittest.py
testing/mozbase/mozlog/mozlog/handlers/statushandler.py
testing/mozbase/mozlog/mozlog/structuredlog.py
--- a/layout/tools/reftest/reftest.jsm
+++ b/layout/tools/reftest/reftest.jsm
@@ -1845,38 +1845,17 @@ function DoAssertionCheck(numAsserts)
             gAssertionCount = newAssertionCount;
 
             numAsserts += numLocalAsserts;
         }
 
         var minAsserts = gURLs[0].minAsserts;
         var maxAsserts = gURLs[0].maxAsserts;
 
-        var expectedAssertions = "expected " + minAsserts;
-        if (minAsserts != maxAsserts) {
-            expectedAssertions += " to " + maxAsserts;
-        }
-        expectedAssertions += " assertions";
-
-        if (numAsserts < minAsserts) {
-            ++gTestResults.AssertionUnexpectedFixed;
-            gDumpFn("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
-                    " | assertion count " + numAsserts + " is less than " +
-                       expectedAssertions + "\n");
-        } else if (numAsserts > maxAsserts) {
-            ++gTestResults.AssertionUnexpected;
-            gDumpFn("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
-                    " | assertion count " + numAsserts + " is more than " +
-                       expectedAssertions + "\n");
-        } else if (numAsserts != 0) {
-            ++gTestResults.AssertionKnown;
-            gDumpFn("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
-                    "assertion count " + numAsserts + " matches " +
-                    expectedAssertions + "\n");
-        }
+        logger.assertionCount(gCurrentURL, numAsserts, minAsserts, maxAsserts);
     }
 
     if (gURLs[0].chaosMode) {
         gWindowUtils.leaveChaosMode();
     }
 
     // And start the next test.
     gURLs.shift();
--- a/testing/modules/StructuredLog.jsm
+++ b/testing/modules/StructuredLog.jsm
@@ -82,16 +82,25 @@ StructuredLogger.prototype = {
     }
     if (extra !== null) {
       data.extra = extra;
     }
 
     this._logData("test_end", data);
   },
 
+  assertionCount: function (test, count, minExpected=0, maxExpected=0) {
+      var data = {test: test,
+                  min_expected: minExpected,
+                  max_expected: maxExpected,
+                  count: count};
+
+    this._logData("assertion_count", data);
+  },
+
   suiteStart: function (tests, runinfo=null, versioninfo=null, deviceinfo=null, extra=null) {
     var data = {tests: tests.map(x => this._testId(x))};
     if (runinfo !== null) {
       data.runinfo = runinfo;
     }
 
     if (versioninfo !== null) {
       data.versioninfo = versioninfo;
--- a/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
@@ -49,16 +49,17 @@ class MachFormatter(base.BaseFormatter):
         self.has_unexpected = {}
         self.last_time = None
         self.terminal = terminal
         self.verbose = False
         self._known_pids = set()
 
         self.summary_values = {"tests": 0,
                                "subtests": 0,
+                               "assertion_counts": 0,
                                "expected": 0,
                                "unexpected": defaultdict(int),
                                "skipped": 0}
         self.summary_unexpected = []
 
     def __call__(self, data):
         s = base.BaseFormatter.__call__(self, data)
         if s is None:
@@ -82,16 +83,20 @@ class MachFormatter(base.BaseFormatter):
                     color = self.terminal.green
                 else:
                     color = self.terminal.red
             elif data["action"] in ("suite_start", "suite_end",
                                     "test_start", "test_status"):
                 color = self.terminal.yellow
             elif data["action"] == "crash":
                 color = self.terminal.red
+            elif data["action"] == "assertion_count":
+                if (data["count"] > data["max_expected"] or
+                    data["count"] < data["min_expected"]):
+                    color = self.terminal.red
 
             if color is not None:
                 action = color(action)
 
         return "%s %s: %s %s\n" % (time, action, thread, s)
 
     def _get_test_id(self, data):
         test_id = data.get("test")
@@ -106,37 +111,45 @@ class MachFormatter(base.BaseFormatter):
         if isinstance(test_id, tuple):
             return "".join(test_id)
 
         assert False, "unexpected test_id"
 
     def suite_start(self, data):
         self.summary_values = {"tests": 0,
                                "subtests": 0,
+                               "assertion_counts": 0,
                                "expected": 0,
                                "unexpected": defaultdict(int),
                                "skipped": 0}
         self.summary_unexpected = []
         return "%i" % len(data["tests"])
 
     def suite_end(self, data):
         term = self.terminal if self.terminal is not None else NullTerminal()
 
         heading = "Summary"
         rv = ["", heading, "=" * len(heading), ""]
 
         has_subtests = self.summary_values["subtests"] > 0
+        has_assert_counts = self.summary_values["assertion_counts"] > 0
 
+        test_count = self.summary_values["tests"]
+        components = ["%i parents" % self.summary_values["tests"]]
         if has_subtests:
-            rv.append("Ran %i tests (%i parents, %i subtests)" %
-                      (self.summary_values["tests"] + self.summary_values["subtests"],
-                       self.summary_values["tests"],
-                       self.summary_values["subtests"]))
-        else:
-            rv.append("Ran %i tests" % self.summary_values["tests"])
+            test_count += self.summary_values["subtests"]
+            components.append("%i subtests" % self.summary_values["subtests"])
+        if has_assert_counts:
+            test_count += self.summary_values["assertion_counts"]
+            components.append("%i assertion counts" % self.summary_values["assertion_counts"])
+
+        summary = "Ran %i tests" % test_count
+        if len(components) > 1:
+            summary += " (%s)" % ", ".join(components)
+        rv.append(summary)
 
         rv.append("Expected results: %i" % self.summary_values["expected"])
 
         unexpected_count = sum(self.summary_values["unexpected"].values())
         if unexpected_count > 0:
             unexpected_str = " (%s)" % ", ".join("%s: %i" % (key, value) for key, value in
                                                  sorted(self.summary_values["unexpected"].items()))
         else:
@@ -274,16 +287,34 @@ class MachFormatter(base.BaseFormatter):
             # We only append an unexpected summary if it was not logged
             # directly by verbose mode.
             self.status_buffer[test]["unexpected"].append((subtest,
                                                            status,
                                                            data["expected"],
                                                            message))
         return rv
 
+    def assertion_count(self, data):
+        self.summary_values["assertion_counts"] += 1
+        if data["min_expected"] != data["max_expected"]:
+            expected = "%i to %i" % (data["min_expected"],
+                                     data["max_expected"])
+        else:
+            expected = "%i" % data["min_expected"]
+
+        if data["min_expected"] <= data["count"] <= data["max_expected"]:
+            return
+        elif data["max_expected"] < data["count"]:
+            status = "FAIL"
+        else:
+            status = "PASS"
+
+        self.summary_values["unexpected"][status] += 1
+        return "Assertion count %i, expected %s assertions\n" % (data["count"], expected)
+
     def _update_summary(self, data):
         if "expected" in data:
             self.summary_values["unexpected"][data["status"]] += 1
         elif data["status"] == "SKIP":
             self.summary_values["skipped"] += 1
         else:
             self.summary_values["expected"] += 1
 
--- a/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
@@ -134,16 +134,35 @@ class TbplFormatter(BaseFormatter):
                 self.buffer.clear()
                 return "".join(rv)
             else:
                 self.subtests_count += 1
                 self.buffer.append(data)
         else:
             return self._format_status(data)
 
+    def assertion_count(self, data):
+        if data["min_expected"] != data["max_expected"]:
+            expected = "%i to %i" % (data["min_expected"],
+                                     data["max_expected"])
+        else:
+            expected = "%i" % data["min_expected"]
+
+        if data["count"] < data["min_expected"]:
+            status, comparison = "TEST-UNEXPECTED-PASS", "is less than"
+        elif data["count"] > data["max_expected"]:
+            status, comparison = "TEST-UNEXPECTED-FAIL", "is more than"
+        elif data["count"] > 0:
+            status, comparison = "TEST-KNOWN-FAIL", "matches"
+        else:
+            return
+
+        return ("%s | %s | assertion count %i %s expected %s assertions\n" %
+                (status, data["test"], data["count"], comparison, expected))
+
     def _format_status(self, data):
         message = "- " + data["message"] if "message" in data else ""
         if "stack" in data:
             message += "\n%s" % data["stack"]
         if message and message[-1] == "\n":
             message = message[:-1]
 
         if "expected" in data:
--- a/testing/mozbase/mozlog/mozlog/formatters/unittest.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/unittest.py
@@ -35,16 +35,31 @@ class UnittestFormatter(base.BaseFormatt
                 self.fails.append(data)
             elif status == "ERROR":
                 self.errors.append(data)
 
         elif data["status"] == "SKIP":
             char = "S"
         return char
 
+    def assertion_count(self, data):
+        if data["count"] < data["min_expected"]:
+            char = "X"
+        elif data["count"] > data["max_expected"]:
+            char = "F"
+            self.fails.append({"test": data["test"],
+                               "message": ("assertion count %i is greated than %i" %
+                                           (data["count"], data["max_expected"]))})
+        elif data["count"] > 0:
+            char = "."
+        else:
+            char = "."
+
+        return char
+
     def suite_end(self, data):
         self.end_time = data["time"]
         summary = "\n".join([self.output_fails(),
                              self.output_errors(),
                              self.output_summary()])
         return "\n%s\n" % summary
 
     def output_fails(self):
--- a/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
@@ -38,15 +38,25 @@ class StatusHandler(object):
 
         if action in ('test_status', 'test_end'):
             status = data['status']
             if 'expected' in data:
                 self.unexpected_statuses[status] += 1
             else:
                 self.expected_statuses[status] += 1
 
+        if action == "assertion_count":
+            if data["count"] < data["min_expected"]:
+                self.unexpected_statuses["PASS"] += 1
+            elif data["count"] > data["max_expected"]:
+                self.unexpected_statuses["FAIL"] += 1
+            elif data["count"]:
+                self.expected_statuses["FAIL"] += 1
+            else:
+                self.expected_statuses["PASS"] += 1
+
     def summarize(self):
         return RunSummary(
             dict(self.unexpected_statuses),
             dict(self.expected_statuses),
             dict(self.log_level_counts),
             dict(self.action_counts),
         )
--- a/testing/mozbase/mozlog/mozlog/structuredlog.py
+++ b/testing/mozbase/mozlog/mozlog/structuredlog.py
@@ -41,16 +41,21 @@ Allowed actions, and subfields:
       expected [As for status] - Status that the subtest was expected to get,
                                  or absent if the subtest got the expected status
 
   process_output
       process - PID of the process
       command - Command line of the process
       data - Output data from the process
 
+  assertion_count
+      count - Number of assertions produced
+      min_expected - Minimum expected number of assertions
+      max_expected - Maximum expected number of assertions
+
   log
       level [CRITICAL | ERROR | WARNING |
              INFO | DEBUG] - level of the logging message
       message - Message to log
 
 Subfields for all messages:
       action - the action type of the current message
       time - the timestamp in ms since the epoch of the log message
@@ -417,16 +422,29 @@ class StructuredLogger(object):
         :param process: A unique identifier for the process producing the
                         output (typically the pid)
         :param exitcode: the exit code
         :param command: A string representing the full command line used to
                         start the process.
         """
         self._log_data("process_exit", data)
 
+    @log_action(TestId("test"),
+                Int("count"),
+                Int("min_expected"),
+                Int("max_expected"))
+    def assertion_count(self, data):
+        """Log count of assertions produced when running a test.
+
+        :param count: - Number of assertions produced
+        :param min_expected: - Minimum expected number of assertions
+        :param max_expected: - Maximum expected number of assertions
+        """
+        self._log_data("assertion_count", data)
+
 
 def _log_func(level_name):
     @log_action(Unicode("message"),
                 Any("exc_info", default=False))
     def log(self, data):
         exc_info = data.pop("exc_info", None)
         if exc_info:
             if not isinstance(exc_info, tuple):