Bug 1504227 - Add raptor test option to specify what measurement(s) to alert on; r=davehunt
authorRob Wood <rwood@mozilla.com>
Fri, 25 Jan 2019 18:01:45 +0000
changeset 515496 07500fee706b0073f2d36e416e9fdd407968f494
parent 515495 9a008f412fd77d52636b6430c07628bfec3b8252
child 515497 da2115631f1b4b216f69296761f0c9e2795aef8f
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdavehunt
bugs1504227
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1504227 - Add raptor test option to specify what measurement(s) to alert on; r=davehunt Depends on D17288 Differential Revision: https://phabricator.services.mozilla.com/D17289
testing/raptor/raptor/manifest.py
testing/raptor/raptor/output.py
testing/raptor/raptor/raptor.py
testing/raptor/raptor/results.py
--- a/testing/raptor/raptor/manifest.py
+++ b/testing/raptor/raptor/manifest.py
@@ -44,36 +44,40 @@ def validate_test_ini(test_details):
 
     for setting in required_settings:
         # measure setting not required for benchmark type tests
         if setting == 'measure' and test_details['type'] == 'benchmark':
             continue
         if setting not in test_details:
             valid_settings = False
             LOG.error("ERROR: setting '%s' is required but not found in %s"
-                     % (setting, test_details['manifest']))
+                      % (setting, test_details['manifest']))
 
     # if playback is specified, we need more playback settings
     if 'playback' in test_details:
         for setting in playback_settings:
             if setting not in test_details:
                 valid_settings = False
                 LOG.error("ERROR: setting '%s' is required but not found in %s"
-                         % (setting, test_details['manifest']))
+                          % (setting, test_details['manifest']))
 
     # if 'alert-on' is specified, we need to make sure that the value given is valid
     # i.e. any 'alert_on' values must be values that exist in the 'measure' ini setting
-    # i.e. 'alert_on = fcp, loadtime' must get rid of comma and space
     if 'alert_on' in test_details:
-        for alert_on_value in test_details['alert_on'].split(', '):
-            alert_on_value = alert_on_value.strip()
+
+        # support with or without spaces, i.e. 'measure = fcp, loadtime' or '= fcp,loadtime'
+        # convert to a list; and remove any spaces
+        test_details['alert_on'] = [_item.strip() for _item in test_details['alert_on'].split(',')]
+
+        # now make sure each alert_on value provided is valid
+        for alert_on_value in test_details['alert_on']:
             if alert_on_value not in test_details['measure']:
-                LOG.error("ERROR: The 'alert_on' value of '%s' is not valid because " \
+                LOG.error("ERROR: The 'alert_on' value of '%s' is not valid because "
                           "it doesn't exist in the 'measure' test setting!"
-                         % alert_on_value)
+                          % alert_on_value)
                 valid_settings = False
     return valid_settings
 
 
 def write_test_settings_json(args, test_details, oskey):
     # write test settings json file with test details that the control
     # server will provide for the web ext
     test_url = transform_platform(test_details['test_url'], oskey)
@@ -84,34 +88,32 @@ def write_test_settings_json(args, test_
             "test_url": test_url,
             "page_cycles": int(test_details['page_cycles']),
             "host": args.host,
         }
     }
 
     if test_details['type'] == "pageload":
         test_settings['raptor-options']['measure'] = {}
-        if "dcf" in test_details['measure']:
-            test_settings['raptor-options']['measure']['dcf'] = True
-        if "fnbpaint" in test_details['measure']:
-            test_settings['raptor-options']['measure']['fnbpaint'] = True
-        if "fcp" in test_details['measure']:
-            test_settings['raptor-options']['measure']['fcp'] = True
-        if "hero" in test_details['measure']:
-            test_settings['raptor-options']['measure']['hero'] = test_details['hero'].split(', ')
-        if "ttfi" in test_details['measure']:
-            test_settings['raptor-options']['measure']['ttfi'] = True
-        if "loadtime" in test_details['measure']:
-            test_settings['raptor-options']['measure']['loadtime'] = True
+
+        for m in [m.strip() for m in test_details['measure'].split(',')]:
+            test_settings['raptor-options']['measure'][m] = True
+            if m == 'hero':
+                test_settings['raptor-options']['measure'][m] = [h.strip() for h in
+                                                                 test_details['hero'].split(',')]
+
         if test_details.get("alert_on", None) is not None:
-            # i.e. 'alert_on = fcp, loadtime' must get rid of comma and space
-            test_settings['raptor-options']['alert_on'] = test_details['alert_on'].split(', ')
+            # alert_on was already converted to list above
+            test_settings['raptor-options']['alert_on'] = test_details['alert_on']
+
     if test_details.get("page_timeout", None) is not None:
         test_settings['raptor-options']['page_timeout'] = int(test_details['page_timeout'])
+
     test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")
+
     if test_details.get("lower_is_better", "true") == "false":
         test_settings['raptor-options']['lower_is_better'] = False
     else:
         test_settings['raptor-options']['lower_is_better'] = True
 
     # support optional subtest unit/lower_is_better fields, default to main test values if not set
     val = test_details.get('subtest_unit', test_settings['raptor-options']['unit'])
     test_settings['raptor-options']['subtest_unit'] = val
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -16,25 +16,26 @@ import os
 from mozlog import get_proxy_logger
 
 LOG = get_proxy_logger(component="raptor-output")
 
 
 class Output(object):
     """class for raptor output"""
 
-    def __init__(self, results, supporting_data):
+    def __init__(self, results, supporting_data, subtest_alert_on):
         """
         - results : list of RaptorTestResult instances
         """
         self.results = results
         self.summarized_results = {}
         self.supporting_data = supporting_data
         self.summarized_supporting_data = []
         self.summarized_screenshots = []
+        self.subtest_alert_on = subtest_alert_on
 
     def summarize(self):
         suites = []
         test_results = {
             'framework': {
                 'name': 'raptor',
             },
             'suites': suites,
@@ -95,16 +96,24 @@ class Output(object):
                     # to include those '-1' TTFI values in our final results calculations
                     if measurement_name == "ttfi":
                         filtered_values = filter.ignore_negative(filtered_values)
                         # we've already removed the first pageload value; if there aren't any more
                         # valid TTFI values available for this pageload just remove it from results
                         if len(filtered_values) < 1:
                             continue
 
+                    # if 'alert_on' is set for this particular measurement, then we want to set the
+                    # flag in the perfherder output to turn on alerting for this subtest
+                    if self.subtest_alert_on is not None:
+                        if measurement_name in self.subtest_alert_on:
+                            LOG.info("turning on subtest alerting for measurement type: %s"
+                                     % measurement_name)
+                            new_subtest['shouldAlert'] = True
+
                     new_subtest['value'] = filter.median(filtered_values)
 
                     vals.append([new_subtest['value'], new_subtest['name']])
                     subtests.append(new_subtest)
 
             elif test.type == "benchmark":
                 if 'speedometer' in test.measurements:
                     subtests, vals = self.parseSpeedometerOutput(test)
--- a/testing/raptor/raptor/raptor.py
+++ b/testing/raptor/raptor/raptor.py
@@ -209,16 +209,21 @@ class Raptor(object):
         # add test specific preferences
         if test.get("preferences", None) is not None:
             if self.config['app'] == "firefox":
                 self.profile.set_preferences(json.loads(test['preferences']))
             else:
                 self.log.info("preferences were configured for the test, \
                               but we do not install them on non Firefox browsers.")
 
+        # if 'alert_on' was provided in the test INI, we must add that to our config
+        # for use in our results.py and output.py
+        # test['alert_on'] has already been converted to a list and stripped of spaces
+        self.config['subtest_alert_on'] = test.get('alert_on', None)
+
         # on firefox we can get an addon id; chrome addon actually is just cmd line arg
         if self.config['app'] in ['firefox', 'geckoview', 'fennec']:
             webext_id = self.profile.addons.addon_details(raptor_webext)['id']
 
         # for android/geckoview, create a top-level raptor folder on the device
         # sdcard; if it already exists remove it so we start fresh each time
         if self.config['app'] in ["geckoview", "fennec"]:
             self.device_raptor_dir = "/sdcard/raptor"
--- a/testing/raptor/raptor/results.py
+++ b/testing/raptor/raptor/results.py
@@ -64,17 +64,17 @@ class RaptorResultsHandler():
                  % supporting_data['type'])
         if self.supporting_data is None:
             self.supporting_data = []
         self.supporting_data.append(supporting_data)
 
     def summarize_and_output(self, test_config):
         # summarize the result data, write to file and output PERFHERDER_DATA
         LOG.info("summarizing raptor test results")
-        output = Output(self.results, self.supporting_data)
+        output = Output(self.results, self.supporting_data, test_config['subtest_alert_on'])
         output.summarize()
         output.summarize_screenshots(self.images)
         # only dump out supporting data (i.e. power) if actual Raptor test completed
         if self.supporting_data is not None and len(self.results) != 0:
             output.summarize_supporting_data()
             output.output_supporting_data()
         return output.output()