Bug 1593942 - Provide browser name and version being tested in perfherder data. r=perftest-reviewers,stephendonner
authorGregory Mierzwinski <gmierz2@outlook.com>
Mon, 11 Nov 2019 18:43:58 +0000
changeset 501790 80ecfe77072130f16fe643a588ae534c8819cc8a
parent 501789 291ef4ef1773d3a6cf138975185cdef75010e76b
child 501791 f30f256f9b67880a1d934d2e1a4d0fd012d06818
push id100433
push usergmierz2@outlook.com
push dateWed, 13 Nov 2019 19:56:21 +0000
treeherderautoland@80ecfe770721 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersperftest-reviewers, stephendonner
bugs1593942
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1593942 - Provide browser name and version being tested in perfherder data. r=perftest-reviewers,stephendonner This patch makes use of the new application section in the perfherder data to provide the browser name and version being tested. Differential Revision: https://phabricator.services.mozilla.com/D52565
testing/raptor/raptor/output.py
testing/raptor/raptor/raptor.py
testing/raptor/raptor/results.py
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -28,21 +28,28 @@ class PerftestOutput(object):
         - results : list of RaptorTestResult instances
         """
         self.results = results
         self.summarized_results = {}
         self.supporting_data = supporting_data
         self.summarized_supporting_data = []
         self.summarized_screenshots = []
         self.subtest_alert_on = subtest_alert_on
+        self.browser_name = None
+        self.browser_version = None
 
     @abstractmethod
     def summarize(self, test_names):
         raise NotImplementedError()
 
+    def set_browser_meta(self, browser_name, browser_version):
+        # sets the browser metadata for the perfherder data
+        self.browser_name = browser_name
+        self.browser_version = browser_version
+
     def summarize_supporting_data(self):
         '''
         Supporting data was gathered outside of the main raptor test; it will be kept
         separate from the main raptor test results. Summarize it appropriately.
 
         supporting_data = {'type': 'data-type',
                            'test': 'raptor-test-ran-when-data-was-gathered',
                            'unit': 'unit that the values are in',
@@ -118,17 +125,24 @@ class PerftestOutput(object):
                     vals,
                     testname="supporting_data",
                     unit=data_set['unit']
                 )
 
         # split the supporting data by type, there will be one
         # perfherder output per type
         for data_type in support_data_by_type:
-            self.summarized_supporting_data.append(support_data_by_type[data_type])
+            data = support_data_by_type[data_type]
+            if self.browser_name:
+                data['application'] = {
+                    'name': self.browser_name
+                }
+                if self.browser_version:
+                    data['application']['version'] = self.browser_version
+            self.summarized_supporting_data.append(data)
 
         return
 
     def output(self, test_names):
         """output to file and perfherder data json"""
         if os.getenv('MOZ_UPLOAD_DIR'):
             # i.e. testing/mozharness/build/raptor.json locally; in production it will
             # be at /tasks/task_*/build/ (where it will be picked up by mozharness later
@@ -185,16 +199,23 @@ class PerftestOutput(object):
             LOG.info("gecko profiling enabled %s" % not_posting)
             output_perf_data = False
         elif test_type == 'scenario':
             # if a resource-usage flag was supplied the perfherder data
             # will still be output from output_supporting_data
             LOG.info("scenario test type was run %s" % not_posting)
             output_perf_data = False
 
+        if self.browser_name:
+            self.summarized_results['application'] = {
+                'name': self.browser_name
+            }
+            if self.browser_version:
+                self.summarized_results['application']['version'] = self.browser_version
+
         total_perfdata = 0
         if output_perf_data:
             # if we have supporting data i.e. power, we ONLY want those measurements
             # dumped out. TODO: Bug 1515406 - Add option to output both supplementary
             # data (i.e. power) and the regular Raptor test result
             # Both are already available as separate PERFHERDER_DATA json blobs
             if len(self.summarized_supporting_data) == 0:
                 LOG.info("PERFHERDER_DATA: %s" % json.dumps(self.summarized_results))
--- a/testing/raptor/raptor/raptor.py
+++ b/testing/raptor/raptor/raptor.py
@@ -147,16 +147,19 @@ either Raptor or browsertime."""
         self.device = None
         self.profile_class = profile_class or app
         self.firefox_android_apps = FIREFOX_ANDROID_APPS
         self.interrupt_handler = interrupt_handler
         self.results_handler = results_handler_class(**self.config)
 
         self.browser_name, self.browser_version = self.get_browser_meta()
 
+        browser_name, browser_version = self.get_browser_meta()
+        self.results_handler.add_browser_meta(self.config['app'], browser_version)
+
         # debug mode is currently only supported when running locally
         self.debug_mode = debug_mode if self.config['run_local'] else False
 
         # if running debug-mode reduce the pause after browser startup
         if self.debug_mode:
             self.post_startup_delay = min(self.post_startup_delay, 3000)
             LOG.info("debug-mode enabled, reducing post-browser startup pause to %d ms"
                      % self.post_startup_delay)
@@ -804,16 +807,18 @@ class Raptor(Perftest):
 
         # set up the results handler
         self.results_handler = RaptorResultsHandler(
             gecko_profile=self.config.get('gecko_profile'),
             power_test=self.config.get('power_test'),
             cpu_test=self.config.get('cpu_test'),
             memory_test=self.config.get('memory_test'),
         )
+        browser_name, browser_version = self.get_browser_meta()
+        self.results_handler.add_browser_meta(self.config['app'], browser_version)
 
         self.start_control_server()
 
     def run_test_setup(self, test):
         super(Raptor, self).run_test_setup(test)
 
         LOG.info("starting raptor test: %s" % test['name'])
         LOG.info("test settings: %s" % str(test))
--- a/testing/raptor/raptor/results.py
+++ b/testing/raptor/raptor/results.py
@@ -28,21 +28,28 @@ class PerftestResultsHandler(object):
         self.power_test = power_test
         self.cpu_test = cpu_test
         self.memory_test = memory_test
         self.app = app
         self.results = []
         self.page_timeout_list = []
         self.images = []
         self.supporting_data = None
+        self.browser_version = None
+        self.browser_name = None
 
     @abstractmethod
     def add(self, new_result_json):
         raise NotImplementedError()
 
+    def add_browser_meta(self, browser_name, browser_version):
+        # sets the browser metadata for the perfherder data
+        self.browser_name = browser_name
+        self.browser_version = browser_version
+
     def add_image(self, screenshot, test_name, page_cycle):
         # add to results
         LOG.info("received screenshot")
         self.images.append({'screenshot': screenshot,
                             'test_name': test_name,
                             'page_cycle': page_cycle})
 
     def add_page_timeout(self, test_name, page_url, pending_metrics):
@@ -167,16 +174,17 @@ class RaptorResultsHandler(PerftestResul
         LOG.info("received results in RaptorResultsHandler.add")
         new_result = RaptorTestResult(new_result_json)
         self.results.append(new_result)
 
     def summarize_and_output(self, test_config, tests, test_names):
         # summarize the result data, write to file and output PERFHERDER_DATA
         LOG.info("summarizing raptor test results")
         output = RaptorOutput(self.results, self.supporting_data, test_config['subtest_alert_on'])
+        output.set_browser_meta(self.browser_name, self.browser_version)
         output.summarize(test_names)
         # that has each browser cycle separate; need to check if there were multiple browser
         # cycles, and if so need to combine results from all cycles into one overall result
         output.combine_browser_cycles()
         output.summarize_screenshots(self.images)
 
         # only dump out supporting data (i.e. power) if actual Raptor test completed
         out_sup_perfdata = 0
@@ -558,17 +566,17 @@ class BrowsertimeResultsHandler(Perftest
                             break
                     else:
                         self.results.append(_new_benchmark_result(new_result))
 
         # now have all results gathered from all browsertime test URLs; format them for output
         output = BrowsertimeOutput(self.results,
                                    self.supporting_data,
                                    test_config['subtest_alert_on'])
-
+        output.set_browser_meta(self.browser_name, self.browser_version)
         output.summarize(test_names)
         success, out_perfdata = output.output(test_names)
 
         validate_success = True
         if not self.gecko_profile:
             validate_success = self._validate_treeherder_data(output, out_perfdata)
 
         # Dumping the video list for the visual metrics task.