Bug 766598 - move responsiveness metric to filter.py. r=jmaher
authormalayaleecoder <malayaleecoder@gmail.com>
Thu, 26 May 2016 22:57:46 +0530
changeset 340393 2870fc2fd1ce033c95e614ffc185625f56cf630e
parent 340392 00fdf59663db3f4391d5d0aeecf90469ba56d1ed
child 340394 867bca2fa2a37ab87fa46045a1970d9f6dc23939
push id1183
push userraliiev@mozilla.com
push dateMon, 05 Sep 2016 20:01:49 +0000
treeherdermozilla-release@3148731bed45 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs766598
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 766598 - move responsiveness metric to filter.py. r=jmaher MozReview-Commit-ID: JDK96sRZEIg
testing/talos/talos/filter.py
testing/talos/talos/output.py
--- a/testing/talos/talos/filter.py
+++ b/testing/talos/talos/filter.py
@@ -181,8 +181,13 @@ def v8_subtest(series, name):
                  'NavierStokes': 1484000.,
                  'RayTrace': 739989.,
                  'RegExp': 910985.,
                  'Richards': 35302.,
                  'Splay': 81491.
                  }
 
     return reference[name] / geometric_mean(series)
+
+
+@define_filter
+def responsiveness_Metric(val_list):
+    return sum([float(x)*float(x) / 1000000.0 for x in val_list])
--- a/testing/talos/talos/output.py
+++ b/testing/talos/talos/output.py
@@ -89,20 +89,16 @@ class Output(object):
     @classmethod
     def isMemoryMetric(cls, resultName):
         """returns if the result is a memory metric"""
         memory_metric = ['memset', 'rss', 'pbytes', 'xres', 'modlistbytes',
                          'main_rss', 'content_rss']  # measured in bytes
         return bool([i for i in memory_metric if i in resultName])
 
     @classmethod
-    def responsiveness_Metric(cls, val_list):
-        return sum([float(x)*float(x) / 1000000.0 for x in val_list])
-
-    @classmethod
     def v8_Metric(cls, val_list):
         results = [i for i, j in val_list]
         score = 100 * filter.geometric_mean(results)
         return score
 
     @classmethod
     def JS_Metric(cls, val_list):
         """v8 benchmark score"""
@@ -140,17 +136,17 @@ class PerfherderOutput(Output):
                       sort_keys=True)
 
     def post(self, results, server, path, scheme, tbpl_output):
         """conform to current code- not needed for perfherder"""
         pass
 
     def construct_results(self, vals, testname):
         if 'responsiveness' in testname:
-            return self.responsiveness_Metric([val for (val, page) in vals])
+            return filter.responsiveness_Metric([val for (val, page) in vals])
         elif testname.startswith('v8_7'):
             return self.v8_Metric(vals)
         elif testname.startswith('kraken'):
             return self.JS_Metric(vals)
         elif testname.startswith('tcanvasmark'):
             return self.CanvasMark_Metric(vals)
         elif len(vals) > 1:
             return filter.geometric_mean([i for i, j in vals])
@@ -253,17 +249,17 @@ class PerfherderOutput(Output):
                     if 'mainthreadio' in name:
                         continue
 
                     # responsiveness has it's own metric, not the mean
                     # TODO: consider doing this for all counters
                     if 'responsiveness' is name:
                         subtest = {
                             'name': name,
-                            'value': self.responsiveness_Metric(vals)
+                            'value': filter.responsiveness_Metric(vals)
                         }
                         counter_subtests.append(subtest)
                         continue
 
                     subtest = {
                         'name': name,
                         'value': 0.0,
                     }