Bug 1520130 - [raptor] Improve Raptor no raptor test results were found message, r=rwood.
authorBob Clary <bclary@bclary.com>
Thu, 31 Jan 2019 20:35:56 -0800
changeset 456364 9287ec4e4115073524a29cb04e8417aeac72dbf1
parent 456351 3bf0b7e7e89a005cbe843681384d49227f83a287
child 456365 cc31e621f479b1d45d21df05030ec73ea6254884
push idunknown
push userunknown
push dateunknown
reviewersrwood
bugs1520130
milestone67.0a1
Bug 1520130 - [raptor] Improve Raptor no raptor test results were found message, r=rwood.
testing/raptor/raptor/output.py
testing/raptor/raptor/raptor.py
testing/raptor/raptor/results.py
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -27,28 +27,29 @@ class Output(object):
         """
         self.results = results
         self.summarized_results = {}
         self.supporting_data = supporting_data
         self.summarized_supporting_data = []
         self.summarized_screenshots = []
         self.subtest_alert_on = subtest_alert_on
 
-    def summarize(self):
+    def summarize(self, test_names):
         suites = []
         test_results = {
             'framework': {
                 'name': 'raptor',
             },
             'suites': suites,
         }
 
         # check if we actually have any results
         if len(self.results) == 0:
-            LOG.error("error: no raptor test results found!")
+            LOG.error("error: no raptor test results found for %s" %
+                      ', '.join(test_names))
             return
 
         for test in self.results:
             vals = []
             subtests = []
             suite = {
                 'name': test.name,
                 'type': test.type,
@@ -129,17 +130,18 @@ class Output(object):
                     subtests, vals = self.parseAssortedDomOutput(test)
                 elif 'wasm-misc' in test.measurements:
                     subtests, vals = self.parseWASMMiscOutput(test)
                 elif 'wasm-godot' in test.measurements:
                     subtests, vals = self.parseWASMGodotOutput(test)
                 suite['subtests'] = subtests
 
             else:
-                LOG.error("output.summarize received unsupported test results type")
+                LOG.error("output.summarize received unsupported test results type for %s" %
+                          test.name)
                 return
 
             # for benchmarks there is generally  more than one subtest in each cycle
             # and a benchmark-specific formula is needed to calculate the final score
 
             # for pageload tests, if there are > 1 subtests here, that means there
             # were multiple measurements captured in each single pageload; we want
             # to get the mean of those values and report 1 overall 'suite' value
@@ -597,20 +599,21 @@ class Output(object):
             </tr>""" % (screenshot['test_name'],
                         screenshot['page_cycle'],
                         screenshot['screenshot'],
                         screenshot['test_name'],
                         screenshot['page_cycle']))
 
         self.summarized_screenshots.append("""</table></body> </html>""")
 
-    def output(self):
+    def output(self, test_names):
         """output to file and perfherder data json """
         if self.summarized_results == {}:
-            LOG.error("error: no summarized raptor results found!")
+            LOG.error("error: no summarized raptor results found for %s" %
+                      ', '.join(test_names))
             return False
 
         if os.environ['MOZ_UPLOAD_DIR']:
             # i.e. testing/mozharness/build/raptor.json locally; in production it will
             # be at /tasks/task_*/build/ (where it will be picked up by mozharness later
             # and made into a tc artifact accessible in treeherder as perfherder-data.json)
             results_path = os.path.join(os.path.dirname(os.environ['MOZ_UPLOAD_DIR']),
                                         'raptor.json')
@@ -645,28 +648,29 @@ class Output(object):
             LOG.info("gecko profiling enabled - not posting results for perfherder")
 
         json.dump(self.summarized_results, open(results_path, 'w'), indent=2,
                   sort_keys=True)
         LOG.info("results can also be found locally at: %s" % results_path)
 
         return True
 
-    def output_supporting_data(self):
+    def output_supporting_data(self, test_names):
         '''
         Supporting data was gathered outside of the main raptor test; it has already
         been summarized, now output it appropriately.
 
         We want to output supporting data in a completely separate perfherder json blob and
         in a corresponding file artifact. This way supporting data can be ingested as it's own
         test suite in perfherder and alerted upon if desired. Kept outside of the test results
         from the actual Raptor test that was ran when the supporting data was gathered.
         '''
         if len(self.summarized_supporting_data) == 0:
-            LOG.error("error: no summarized supporting data found!")
+            LOG.error("error: no summarized supporting data found for %s" %
+                      ', '.join(test_names))
             return False
 
         for next_data_set in self.summarized_supporting_data:
             data_type = next_data_set['suites'][0]['type']
 
             if os.environ['MOZ_UPLOAD_DIR']:
                 # i.e. testing/mozharness/build/raptor.json locally; in production it will
                 # be at /tasks/task_*/build/ (where it will be picked up by mozharness later
--- a/testing/raptor/raptor/raptor.py
+++ b/testing/raptor/raptor/raptor.py
@@ -459,30 +459,30 @@ class Raptor(object):
         upload_dir = os.getenv('MOZ_UPLOAD_DIR')
         if not upload_dir:
             self.log.critical("Profiling ignored because MOZ_UPLOAD_DIR was not set")
         else:
             self.gecko_profiler = GeckoProfile(upload_dir,
                                                self.config,
                                                test)
 
-    def process_results(self):
+    def process_results(self, test_names):
         # when running locally output results in build/raptor.json; when running
         # in production output to a local.json to be turned into tc job artifact
         if self.config.get('run_local', False):
             if 'MOZ_DEVELOPER_REPO_DIR' in os.environ:
                 raptor_json_path = os.path.join(os.environ['MOZ_DEVELOPER_REPO_DIR'],
                                                 'testing', 'mozharness', 'build', 'raptor.json')
             else:
                 raptor_json_path = os.path.join(here, 'raptor.json')
         else:
             raptor_json_path = os.path.join(os.getcwd(), 'local.json')
 
         self.config['raptor_json_path'] = raptor_json_path
-        return self.results_handler.summarize_and_output(self.config)
+        return self.results_handler.summarize_and_output(self.config, test_names)
 
     def get_page_timeout_list(self):
         return self.results_handler.page_timeout_list
 
     def check_for_crashes(self):
         if self.config['app'] in ["geckoview", "fennec"]:
             logcat = self.device.get_logcat()
             if logcat:
@@ -584,16 +584,17 @@ def main(args=sys.argv[1:]):
     if args.debug_mode:
         LOG.info("debug-mode enabled")
 
     LOG.info("received command line arguments: %s" % str(args))
 
     # if a test name specified on command line, and it exists, just run that one
     # otherwise run all available raptor tests that are found for this browser
     raptor_test_list = get_raptor_test_list(args, mozinfo.os)
+    raptor_test_names = [raptor_test['name'] for raptor_test in raptor_test_list]
 
     # ensure we have at least one valid test to run
     if len(raptor_test_list) == 0:
         LOG.critical("abort: no tests found")
         sys.exit(1)
 
     LOG.info("raptor tests scheduled to run:")
     for next_test in raptor_test_list:
@@ -617,22 +618,23 @@ def main(args=sys.argv[1:]):
     for next_test in raptor_test_list:
         if 'page_timeout' not in next_test.keys():
             next_test['page_timeout'] = 120000
         if 'page_cycles' not in next_test.keys():
             next_test['page_cycles'] = 1
 
         raptor.run_test(next_test, timeout=int(next_test['page_timeout']))
 
-    success = raptor.process_results()
+    success = raptor.process_results(raptor_test_names)
     raptor.clean_up()
 
     if not success:
         # didn't get test results; test timed out or crashed, etc. we want job to fail
-        LOG.critical("TEST-UNEXPECTED-FAIL: no raptor test results were found")
+        LOG.critical("TEST-UNEXPECTED-FAIL: no raptor test results were found for %s" %
+                     ', '.join(raptor_test_names))
         os.sys.exit(1)
 
     # if we have results but one test page timed out (i.e. one tp6 test page didn't load
     # but others did) we still dumped PERFHERDER_DATA for the successfull pages but we
     # want the overall test job to marked as a failure
     pages_that_timed_out = raptor.get_page_timeout_list()
     if len(pages_that_timed_out) > 0:
         for _page in pages_that_timed_out:
--- a/testing/raptor/raptor/results.py
+++ b/testing/raptor/raptor/results.py
@@ -61,27 +61,27 @@ class RaptorResultsHandler():
                                'proportional': proportional}}
         '''
         LOG.info("RaptorResultsHandler.add_supporting_data received %s data"
                  % supporting_data['type'])
         if self.supporting_data is None:
             self.supporting_data = []
         self.supporting_data.append(supporting_data)
 
-    def summarize_and_output(self, test_config):
+    def summarize_and_output(self, test_config, test_names):
         # summarize the result data, write to file and output PERFHERDER_DATA
         LOG.info("summarizing raptor test results")
         output = Output(self.results, self.supporting_data, test_config['subtest_alert_on'])
-        output.summarize()
+        output.summarize(test_names)
         output.summarize_screenshots(self.images)
         # only dump out supporting data (i.e. power) if actual Raptor test completed
         if self.supporting_data is not None and len(self.results) != 0:
             output.summarize_supporting_data()
-            output.output_supporting_data()
-        return output.output()
+            output.output_supporting_data(test_names)
+        return output.output(test_names)
 
 
 class RaptorTestResult():
     """Single Raptor test result class"""
 
     def __init__(self, test_result_json):
         self.extra_options = []
         # convert test result json/dict (from control server) to test result object instance