Bug 1473365 - migrate AWFY assortedDOM tests to a raptor benchmark r=jmaher
authorRob Wood <rwood@mozilla.com>
Mon, 13 Aug 2018 21:30:33 +0000
changeset 431396 a2876bd195728a0f96aa432e795f09c2a6a62c1e
parent 431395 c8c488989a62c9ad8805569062c89ff74e641418
child 431397 8fdd88cea1887c403c09a486cff98313bd191f13
push id67761
push userrwood@mozilla.com
push dateTue, 14 Aug 2018 14:44:49 +0000
treeherderautoland@a2876bd19572 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs1473365
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1473365 - migrate AWFY assortedDOM tests to a raptor benchmark r=jmaher Differential Revision: https://phabricator.services.mozilla.com/D3046
taskcluster/ci/test/raptor.yml
taskcluster/ci/test/test-sets.yml
testing/raptor/mach_commands.py
testing/raptor/raptor/output.py
testing/raptor/raptor/raptor.ini
testing/raptor/raptor/tests/raptor-assorted-dom.ini
testing/raptor/webext/raptor/manifest.json
--- a/taskcluster/ci/test/raptor.yml
+++ b/taskcluster/ci/test/raptor.yml
@@ -216,8 +216,37 @@ raptor-unity-webgl-chrome:
     max-run-time: 3600
     mozharness:
         extra-options:
             - --test=raptor-unity-webgl
             - --app=chrome
     fetches:
         fetch:
             - unity-webgl
+
+raptor-assorted-dom-firefox:
+    description: "Raptor Assorted-Dom on Firefox"
+    try-name: raptor-assorted-dom-firefox
+    treeherder-symbol: Rap(dom)
+    run-on-projects: ['try', 'mozilla-central']
+    tier: 3
+    max-run-time: 1500
+    mozharness:
+        extra-options:
+            - --test=raptor-assorted-dom
+    fetches:
+        fetch:
+            - assorted-dom
+
+raptor-assorted-dom-chrome:
+    description: "Raptor Assorted-Dom on Chrome"
+    try-name: raptor-assorted-dom-chrome
+    treeherder-symbol: Rap-C(dom)
+    run-on-projects: ['try', 'mozilla-central']
+    tier: 3
+    max-run-time: 1500
+    mozharness:
+        extra-options:
+            - --test=raptor-assorted-dom
+            - --app=chrome
+    fetches:
+        fetch:
+            - assorted-dom
--- a/taskcluster/ci/test/test-sets.yml
+++ b/taskcluster/ci/test/test-sets.yml
@@ -85,26 +85,28 @@ raptor-firefox:
     - raptor-tp6-firefox
     - raptor-speedometer-firefox
     - raptor-stylebench-firefox
     - raptor-motionmark-htmlsuite-firefox
     - raptor-motionmark-animometer-firefox
     - raptor-webaudio-firefox
     - raptor-gdocs-firefox
     - raptor-sunspider-firefox
+    - raptor-assorted-dom-firefox
 
 raptor-chrome:
     - raptor-tp6-chrome
     - raptor-speedometer-chrome
     - raptor-stylebench-chrome
     - raptor-motionmark-htmlsuite-chrome
     - raptor-motionmark-animometer-chrome
     - raptor-webaudio-chrome
     - raptor-gdocs-chrome
     - raptor-sunspider-chrome
+    - raptor-assorted-dom-chrome
 
 # Fetch tasks are only supported on Linux for now,
 # so these need to be separate sets.
 raptor-fetch-firefox:
     - raptor-unity-webgl-firefox
 
 raptor-fetch-chrome:
     - raptor-unity-webgl-chrome
--- a/testing/raptor/mach_commands.py
+++ b/testing/raptor/mach_commands.py
@@ -17,17 +17,17 @@ import subprocess
 from mach.decorators import CommandProvider, Command
 from mozboot.util import get_state_dir
 from mozbuild.base import MozbuildObject, MachCommandBase
 from mozpack.copier import FileCopier
 from mozpack.manifests import InstallManifest
 
 HERE = os.path.dirname(os.path.realpath(__file__))
 BENCHMARK_REPOSITORY = 'https://github.com/mozilla/perf-automation'
-BENCHMARK_REVISION = '6beb3d3e22abce8cf8e2e89bc45acd4152258f12'
+BENCHMARK_REVISION = '4befd28725c687b91ce749420eab29352ecbcab4'
 
 
 class RaptorRunner(MozbuildObject):
     def run_test(self, raptor_args):
         """
         We want to do couple of things before running raptor
         1. Clone mozharness
         2. Make config for raptor mozharness
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -92,16 +92,18 @@ class Output(object):
                 elif 'motionmark' in test.measurements:
                     subtests, vals = self.parseMotionmarkOutput(test)
                 elif 'sunspider' in test.measurements:
                     subtests, vals = self.parseSunspiderOutput(test)
                 elif 'webaudio' in test.measurements:
                     subtests, vals = self.parseWebaudioOutput(test)
                 elif 'unity-webgl' in test.measurements:
                     subtests, vals = self.parseUnityWebGLOutput(test)
+                elif 'assorted-dom' in test.measurements:
+                    subtests, vals = self.parseAssortedDomOutput(test)
                 suite['subtests'] = subtests
 
             else:
                 LOG.error("output.summarize received unsupported test results type")
                 return
 
             # for pageload tests, if there are > 1 subtests here, that means there
             # were multiple measurements captured in each single pageload; we want
@@ -337,16 +339,57 @@ class Output(object):
         names.sort(reverse=True)
         for name in names:
             _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
+    def parseAssortedDomOutput(self, test):
+        # each benchmark 'index' becomes a subtest; each pagecycle / iteration
+        # of the test has multiple values
+
+        # this is the format we receive the results in from the benchmark
+        # i.e. this is ONE pagecycle of assorted-dom ('test' is a valid subtest name btw):
+
+        # {u'worker-getname-performance-getter': 5.9, u'window-getname-performance-getter': 6.1,
+        # u'window-getprop-performance-getter': 6.1, u'worker-getprop-performance-getter': 6.1,
+        # u'test': 5.8, u'total': 30}
+
+        # the 'total' is provided for us from the benchmark; the overall score will be the mean of
+        # the totals from all pagecycles; but keep all the subtest values for the logs/json
+
+        _subtests = {}
+        data = test.measurements['assorted-dom']
+        for pagecycle in data:
+            for _sub, _value in pagecycle[0].iteritems():
+                # build a list of subtests and append all related replicates
+                if _sub not in _subtests.keys():
+                    # subtest not added yet, first pagecycle, so add new one
+                    _subtests[_sub] = {'unit': test.unit,
+                                       'alertThreshold': float(test.alert_threshold),
+                                       'lowerIsBetter': test.lower_is_better,
+                                       'name': _sub,
+                                       'replicates': []}
+                _subtests[_sub]['replicates'].extend([_value])
+
+        vals = []
+        subtests = []
+        names = _subtests.keys()
+        names.sort(reverse=True)
+        for name in names:
+            _subtests[name]['value'] = round(filter.median(_subtests[name]['replicates']), 2)
+            subtests.append(_subtests[name])
+            # only use the 'total's to compute the overall result
+            if name == 'total':
+                vals.append([_subtests[name]['value'], name])
+
+        return subtests, vals
+
     def output(self):
         """output to file and perfherder data json """
         if self.summarized_results == {}:
             LOG.error("error: no summarized raptor results found!")
             return False
 
         if os.environ['MOZ_UPLOAD_DIR']:
             # i.e. testing/mozharness/build/raptor.json locally; in production it will
@@ -472,16 +515,21 @@ class Output(object):
         score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
         return score
 
     @classmethod
     def sunspider_score(cls, val_list):
         results = [i for i, j in val_list]
         return sum(results)
 
+    @classmethod
+    def assorted_dom_score(cls, val_list):
+        results = [i for i, j in val_list]
+        return round(filter.geometric_mean(results), 2)
+
     def construct_summary(self, vals, testname):
         if testname.startswith('raptor-v8_7'):
             return self.v8_Metric(vals)
         elif testname.startswith('raptor-kraken'):
             return self.JS_Metric(vals)
         elif testname.startswith('raptor-jetstream'):
             return self.benchmark_score(vals)
         elif testname.startswith('raptor-speedometer'):
@@ -489,12 +537,14 @@ class Output(object):
         elif testname.startswith('raptor-stylebench'):
             return self.stylebench_score(vals)
         elif testname.startswith('raptor-sunspider'):
             return self.sunspider_score(vals)
         elif testname.startswith('raptor-unity-webgl'):
             return self.unity_webgl_score(vals)
         elif testname.startswith('raptor-webaudio'):
             return self.webaudio_score(vals)
+        elif testname.startswith('raptor-assorted-dom'):
+            return self.assorted_dom_score(vals)
         elif len(vals) > 1:
             return round(filter.geometric_mean([i for i, j in vals]), 2)
         else:
             return round(filter.mean([i for i, j in vals]), 2)
--- a/testing/raptor/raptor/raptor.ini
+++ b/testing/raptor/raptor/raptor.ini
@@ -3,8 +3,9 @@
 [include:tests/raptor-speedometer.ini]
 [include:tests/raptor-stylebench.ini]
 [include:tests/raptor-sunspider.ini]
 [include:tests/raptor-motionmark-htmlsuite.ini]
 [include:tests/raptor-motionmark-animometer.ini]
 [include:tests/raptor-unity-webgl.ini]
 [include:tests/raptor-webaudio.ini]
 [include:tests/raptor-gdocs.ini]
+[include:tests/raptor-assorted-dom.ini]
new file mode 100644
--- /dev/null
+++ b/testing/raptor/raptor/tests/raptor-assorted-dom.ini
@@ -0,0 +1,14 @@
+[DEFAULT]
+type =  benchmark
+test_url = http://localhost:<port>/assorted-dom/assorted/driver.html?raptor
+page_cycles = 10
+page_timeout = 60000
+unit = ms
+lower_is_better = true
+alert_threshold = 2.0
+
+[raptor-assorted-dom-firefox]
+apps = firefox
+
+[raptor-assorted-dom-chrome]
+apps = chrome
--- a/testing/raptor/webext/raptor/manifest.json
+++ b/testing/raptor/webext/raptor/manifest.json
@@ -20,17 +20,18 @@
       "js": ["measure.js"]
     },
     {
       "matches": ["*://*/Speedometer/index.html*",
                   "*://*/StyleBench/*",
                   "*://*/MotionMark/*",
                   "*://*/SunSpider/*",
                   "*://*/webaudio/*",
-                  "*://*/unity-webgl/index.html*"],
+                  "*://*/unity-webgl/index.html*",
+                  "*://*/assorted-dom/assorted/results.html*"],
       "js": ["benchmark-relay.js"]
     }
   ],
   "permissions": [
     "<all_urls>",
     "tabs",
     "storage",
     "alarms"