Bug 1566017 - Improve 'mach test-info' run type handling; r=jmaher
authorGeoff Brown <gbrown@mozilla.com>
Wed, 20 May 2020 20:29:08 +0000
changeset 531320 2e77f3c6d7f105099f14646e3e30ccde8e7adc19
parent 531319 566d54746a9a4e886039ebcc9da6dd1b78e2ec6b
child 531321 0855a5f132b36b12b78c87f7211d07b731fc431c
push id37437
push usernerli@mozilla.com
push dateThu, 21 May 2020 02:34:41 +0000
treeherdermozilla-central@3d91ba9e1d25 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs1566017
milestone78.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1566017 - Improve 'mach test-info' run type handling; r=jmaher - implement ekyle's tip for better grouping of run types - combine build and run types and eliminate duplicates when building the platform name - report '1proc' instead of 'e10s' so platform names look more similar to treeherder The universal_newlines change is a ride-along, for improved py3 compat. Differential Revision: https://phabricator.services.mozilla.com/D76211
testing/testinfo.py
--- a/testing/testinfo.py
+++ b/testing/testinfo.py
@@ -107,17 +107,17 @@ class TestInfoTests(TestInfo):
     def find_in_hg_or_git(self, test_name):
         if self._hg:
             cmd = [self._hg, 'files', '-I', test_name]
         elif self._git:
             cmd = [self._git, 'ls-files', test_name]
         else:
             return None
         try:
-            out = subprocess.check_output(cmd).splitlines()
+            out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
         except subprocess.CalledProcessError:
             out = None
         return out
 
     def set_test_name(self):
         # Generating a unified report for a specific test is complicated
         # by differences in the test name used in various data sources.
         # Consider:
@@ -225,49 +225,58 @@ class TestInfoTests(TestInfo):
         if self.activedata_test_name:
             print("Found records matching '%s' in ActiveData." %
                   self.activedata_test_name)
         else:
             print("Unable to find matching records in ActiveData; using %s!" %
                   self.test_name)
             self.activedata_test_name = self.test_name
 
-    def get_run_types(self, record):
-        types_label = ""
-        if 'run' in record and 'type' in record['run']:
-            run_types = record['run']['type']
-            run_types = run_types if isinstance(run_types, list) else [run_types]
-            fission = True if 'fis' in run_types else False
-            for run_type in run_types:
-                # chunked is not interesting
-                if run_type == 'chunked':
-                    continue
-                # fission implies e10s
-                if fission and run_type == 'e10s':
-                    continue
-                types_label += "-" + run_type
-        return types_label
-
     def get_platform(self, record):
         if 'platform' in record['build']:
             platform = record['build']['platform']
         else:
             platform = "-"
-        tp = record['build']['type']
-        if type(tp) is list:
-            tp = "-".join(tp)
-        return "%s/%s%s:" % (platform, tp, self.get_run_types(record))
+        platform_words = platform.split('-')
+        types_label = ""
+        # combine run and build types and eliminate duplicates
+        run_types = []
+        if 'run' in record and 'type' in record['run']:
+            run_types = record['run']['type']
+            run_types = run_types if isinstance(run_types, list) else [run_types]
+        build_types = []
+        if 'build' in record and 'type' in record['build']:
+            build_types = record['build']['type']
+            build_types = build_types if isinstance(build_types, list) else [build_types]
+        run_types = list(set(run_types+build_types))
+        # '1proc' is used as a treeherder label but does not appear in run types
+        if 'e10s' not in run_types:
+            run_types = run_types + ['1proc']
+        for run_type in run_types:
+            # chunked is not interesting
+            if run_type == 'chunked':
+                continue
+            # e10s is the default: implied
+            if run_type == 'e10s':
+                continue
+            # sometimes a build/run type is already present in the build platform
+            if run_type in platform_words:
+                continue
+            if types_label:
+                types_label += "-"
+            types_label += run_type
+        return "%s/%s:" % (platform, types_label)
 
     def report_test_results(self):
         # Report test pass/fail summary from ActiveData
         query = {
             "from": "unittest",
             "format": "list",
             "limit": 100,
-            "groupby": ["build.platform", "build.type", "run.type"],
+            "groupby": ["build.platform", "build.type"],
             "select": [
                 {"aggregate": "count"},
                 {
                     "name": "failures",
                     "value": {"case": [
                         {"when": {"eq": {"result.ok": "F"}}, "then": 1}
                     ]},
                     "aggregate": "sum",
@@ -275,17 +284,18 @@ class TestInfoTests(TestInfo):
                 },
                 {
                     "name": "skips",
                     "value": {"case": [
                         {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
                     ]},
                     "aggregate": "sum",
                     "default": 0
-                }
+                },
+                {"value": "run.type", "aggregate": "union"}
             ],
             "where": {"and": [
                 {"eq": {"result.test": self.activedata_test_name}},
                 {"in": {"build.branch": self.branches.split(',')}},
                 {"gt": {"run.timestamp": {"date": self.start}}},
                 {"lt": {"run.timestamp": {"date": self.end}}}
             ]}
         }
@@ -324,23 +334,24 @@ class TestInfoTests(TestInfo):
             print("No test result data found.")
 
     def report_test_durations(self):
         # Report test durations summary from ActiveData
         query = {
             "from": "unittest",
             "format": "list",
             "limit": 100,
-            "groupby": ["build.platform", "build.type", "run.type"],
+            "groupby": ["build.platform", "build.type"],
             "select": [
                 {"value": "result.duration",
                     "aggregate": "average", "name": "average"},
                 {"value": "result.duration", "aggregate": "min", "name": "min"},
                 {"value": "result.duration", "aggregate": "max", "name": "max"},
-                {"aggregate": "count"}
+                {"aggregate": "count"},
+                {"value": "run.type", "aggregate": "union"}
             ],
             "where": {"and": [
                 {"eq": {"result.ok": "T"}},
                 {"eq": {"result.test": self.activedata_test_name}},
                 {"in": {"build.branch": self.branches.split(',')}},
                 {"gt": {"run.timestamp": {"date": self.start}}},
                 {"lt": {"run.timestamp": {"date": self.end}}}
             ]}