Bug 1486789 - support subtest units independent of benchmark units, set values for speedometer. r=ahal
authorJoel Maher <jmaher@mozilla.com>
Wed, 29 Aug 2018 21:01:02 +0000
changeset 434017 872ea5bd5a172c4c0d8bb65f9b064c8e9568f144
parent 434016 24e01317f8c2ab542de4f63304b1a0d23c3dfbd5
child 434018 8897c80e6240025264a58cf1b3ff7cbdf97c6e76
push id107229
push useraiakab@mozilla.com
push dateThu, 30 Aug 2018 04:38:54 +0000
treeherdermozilla-inbound@a278fa75aa70 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal
bugs1486789
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1486789 - support subtest units independent of benchmark units, set values for speedometer. r=ahal allow support in .ini files for subtest units and lower_is_better values; implement this for speedometer. Differential Revision: https://phabricator.services.mozilla.com/D4465
testing/raptor/raptor/manifest.py
testing/raptor/raptor/output.py
testing/raptor/raptor/tests/raptor-speedometer.ini
testing/raptor/webext/raptor/runner.js
--- a/testing/raptor/raptor/manifest.py
+++ b/testing/raptor/raptor/manifest.py
@@ -84,16 +84,23 @@ def write_test_settings_json(test_detail
             test_settings['raptor-options']['measure']['hero'] = test_details['hero'].split()
     if test_details.get("page_timeout", None) is not None:
         test_settings['raptor-options']['page_timeout'] = int(test_details['page_timeout'])
     test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")
     if test_details.get("lower_is_better", "true") == "false":
         test_settings['raptor-options']['lower_is_better'] = False
     else:
         test_settings['raptor-options']['lower_is_better'] = True
+
+    # support optional subtest unit/lower_is_better fields, default to main test values if not set
+    val = test_details.get('subtest_unit', test_settings['raptor-options']['unit'])
+    test_settings['raptor-options']['subtest_unit'] = val
+    val = test_details.get('subtest_lower', test_settings['raptor-options']['lower_is_better'])
+    test_settings['raptor-options']['subtest_lower_is_better'] = val
+
     if test_details.get("alert_threshold", None) is not None:
         test_settings['raptor-options']['alert_threshold'] = float(test_details['alert_threshold'])
 
     settings_file = os.path.join(tests_dir, test_details['name'] + '.json')
     try:
         with open(settings_file, 'w') as out_file:
             json.dump(test_settings, out_file, indent=4, ensure_ascii=False)
             out_file.close()
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -46,16 +46,17 @@ class Output(object):
             vals = []
             subtests = []
             suite = {
                 'name': test.name,
                 'type': test.type,
                 'extraOptions': test.extra_options,
                 'subtests': subtests,
                 'lowerIsBetter': test.lower_is_better,
+                'unit': test.unit,
                 'alertThreshold': float(test.alert_threshold)
             }
 
             suites.append(suite)
 
             # process results for pageloader type of tests
             if test.type == "pageload":
                 # each test can report multiple measurements per pageload
@@ -70,20 +71,20 @@ class Output(object):
                 # u'Firefox 62.0a1 20180528123052', u'lower_is_better': True, u'page':
                 # u'https://www.amazon.com/s/url=search-alias%3Daps&field-keywords=laptop',
                 # u'unit': u'ms', u'alert_threshold': 2}
 
                 for measurement_name, replicates in test.measurements.iteritems():
                     new_subtest = {}
                     new_subtest['name'] = test.name + "-" + measurement_name
                     new_subtest['replicates'] = replicates
-                    new_subtest['lowerIsBetter'] = test.lower_is_better
+                    new_subtest['lowerIsBetter'] = test.subtest_lower_is_better
                     new_subtest['alertThreshold'] = float(test.alert_threshold)
                     new_subtest['value'] = 0
-                    new_subtest['unit'] = test.unit
+                    new_subtest['unit'] = test.subtest_unit
 
                     filtered_values = filter.ignore_first(new_subtest['replicates'], 1)
                     new_subtest['value'] = filter.median(filtered_values)
 
                     vals.append([new_subtest['value'], new_subtest['name']])
                     subtests.append(new_subtest)
 
             elif test.type == "benchmark":
@@ -141,19 +142,19 @@ class Output(object):
 
         _subtests = {}
         data = test.measurements['speedometer']
         for page_cycle in data:
             for sub, replicates in page_cycle[0].iteritems():
                 # for each pagecycle, build a list of subtests and append all related replicates
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
@@ -178,19 +179,19 @@ class Output(object):
         _subtests = {}
         data = test.measurements['wasm-misc']
         for page_cycle in data:
             for item in page_cycle[0]:
                 # for each pagecycle, build a list of subtests and append all related replicates
                 sub = item['name']
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].append(item['time'])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
@@ -225,19 +226,19 @@ class Output(object):
         for page_cycle in data:
             data = json.loads(page_cycle[0])
             for item in data:
                 # for each pagecycle, build a list of subtests and append all related replicates
                 sub = item['name']
                 replicates = [item['duration']]
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
@@ -280,19 +281,19 @@ class Output(object):
 
             # TODO: this assumes a single suite is run
             suite = page_cycle_results.keys()[0]
             for sub in page_cycle_results[suite].keys():
                 replicate = round(page_cycle_results[suite][sub]['frameLength']['average'], 3)
 
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([replicate])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
@@ -306,27 +307,27 @@ class Output(object):
     def parseSunspiderOutput(self, test):
         _subtests = {}
         data = test.measurements['sunspider']
         for page_cycle in data:
             for sub, replicates in page_cycle[0].iteritems():
                 # for each pagecycle, build a list of subtests and append all related replicates
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         total_subtest = {
-            'unit': test.unit,
+            'unit': test.subtest_unit,
             'alertThreshold': float(test.alert_threshold),
-            'lowerIsBetter': test.lower_is_better,
+            'lowerIsBetter': test.subtest_lower_is_better,
             'replicates': [],
             'name': 'benchmark_score',
             'value': 0
         }
         subtests = [total_subtest]
         vals = []
 
         names = _subtests.keys()
@@ -360,19 +361,19 @@ class Output(object):
         data = test.measurements['unity-webgl']
         for page_cycle in data:
             data = json.loads(page_cycle[0])
             for item in data:
                 # for each pagecycle, build a list of subtests and append all related replicates
                 sub = item['benchmark']
                 if sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[sub] = {'unit': test.unit,
+                    _subtests[sub] = {'unit': test.subtest_unit,
                                       'alertThreshold': float(test.alert_threshold),
-                                      'lowerIsBetter': test.lower_is_better,
+                                      'lowerIsBetter': test.subtest_lower_is_better,
                                       'name': sub,
                                       'replicates': []}
                 _subtests[sub]['replicates'].append(item['result'])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
@@ -399,19 +400,19 @@ class Output(object):
 
         _subtests = {}
         data = test.measurements['assorted-dom']
         for pagecycle in data:
             for _sub, _value in pagecycle[0].iteritems():
                 # build a list of subtests and append all related replicates
                 if _sub not in _subtests.keys():
                     # subtest not added yet, first pagecycle, so add new one
-                    _subtests[_sub] = {'unit': test.unit,
+                    _subtests[_sub] = {'unit': test.subtest_unit,
                                        'alertThreshold': float(test.alert_threshold),
-                                       'lowerIsBetter': test.lower_is_better,
+                                       'lowerIsBetter': test.subtest_lower_is_better,
                                        'name': _sub,
                                        'replicates': []}
                 _subtests[_sub]['replicates'].extend([_value])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
--- a/testing/raptor/raptor/tests/raptor-speedometer.ini
+++ b/testing/raptor/raptor/tests/raptor-speedometer.ini
@@ -4,18 +4,20 @@
 
 # speedometer benchmark for firefox and chrome
 
 [DEFAULT]
 type =  benchmark
 test_url = http://localhost:<port>/Speedometer/index.html?raptor
 page_cycles = 5
 page_timeout = 180000
-unit = score
-lower_is_better = false
+unit = score
+subtest_unit = ms
+lower_is_better = false
+subtest_lower_is_better = true
 alert_threshold = 2.0
 
 [raptor-speedometer-firefox]
 apps = firefox
 
 [raptor-speedometer-chrome]
 apps = chrome
 
--- a/testing/raptor/webext/raptor/runner.js
+++ b/testing/raptor/webext/raptor/runner.js
@@ -73,17 +73,19 @@ function getTestSettings() {
           // just replace the '<port>' keyword in the URL with actual benchmarkPort
           testURL = testURL.replace("<port>", benchmarkPort);
         }
 
         results.page = testURL;
         results.type = testType;
         results.name = testName;
         results.unit = settings.unit;
-        results.lower_is_better = settings.lower_is_better;
+        results.subtest_unit = settings.subtest_unit;
+        results.lower_is_better = settings.lower_is_better == "true";
+        results.subtest_lower_is_better = settings.subtest_lower_is_better == "true";
         results.alert_threshold = settings.alert_threshold;
 
         if (settings.page_timeout !== undefined) {
           pageTimeout = settings.page_timeout;
         }
         console.log("using page timeout (ms): " + pageTimeout);
 
         if (testType == "pageload") {