Bug 1548845 - [raptor] Fix local import of filter module. r=perftest-reviewers,rwood
☠☠ backed out by f72947acdfcd ☠ ☠
authorHenrik Skupin <mail@hskupin.info>
Fri, 17 May 2019 11:12:40 +0000
changeset 474314 a2544ca8c593cd50ac0753fdc2f799f39c0b057e
parent 474313 152615db9db653c273b5e90487d198ae2fd788a5
child 474315 609f489bdc8c0370d97b00dcc496454ee5d0296c
push id36027
push usershindli@mozilla.com
push dateFri, 17 May 2019 16:24:38 +0000
treeherdermozilla-central@c94c54aff466 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersperftest-reviewers, rwood
bugs1548845
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1548845 - [raptor] Fix local import of filter module. r=perftest-reviewers,rwood To not collide with the built-in "filter" method, the local filter module should be named as filters. Differential Revision: https://phabricator.services.mozilla.com/D30532
testing/raptor/raptor/filter.py
testing/raptor/raptor/filters.py
testing/raptor/raptor/output.py
rename from testing/raptor/raptor/filter.py
rename to testing/raptor/raptor/filters.py
--- a/testing/raptor/raptor/filter.py
+++ b/testing/raptor/raptor/filters.py
@@ -11,20 +11,20 @@ import math
 """
 data filters:
 takes a series of run data and applies statistical transforms to it
 
 Each filter is a simple function, but it also have attached a special
 `prepare` method that create a tuple with one instance of a
 :class:`Filter`; this allow to write stuff like::
 
-  from raptor import filter
-  filters = filter.ignore_first.prepare(1) + filter.median.prepare()
+  from raptor import filters
+  filter_list = filters.ignore_first.prepare(1) + filters.median.prepare()
 
-  for filter in filters:
+  for filter in filter_list:
       data = filter(data)
   # data is filtered
 """
 
 _FILTERS = {}
 
 
 class Filter(object):
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -3,17 +3,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # some parts of this originally taken from /testing/talos/talos/output.py
 
 """output raptor test results"""
 from __future__ import absolute_import
 
-import filter
+import filters
 
 import json
 import os
 
 from mozlog import get_proxy_logger
 
 LOG = get_proxy_logger(component="raptor-output")
 
@@ -96,41 +96,41 @@ class Output(object):
                     new_subtest['alertThreshold'] = float(test.alert_threshold)
                     new_subtest['value'] = 0
                     new_subtest['unit'] = test.subtest_unit
 
                     if test.cold is False:
                         # for warm page-load, ignore first value due to 1st pageload noise
                         LOG.info("ignoring the first %s value due to initial pageload noise"
                                  % measurement_name)
-                        filtered_values = filter.ignore_first(new_subtest['replicates'], 1)
+                        filtered_values = filters.ignore_first(new_subtest['replicates'], 1)
                     else:
                         # for cold-load we want all the values
                         filtered_values = new_subtest['replicates']
 
                     # for pageload tests that measure TTFI: TTFI is not guaranteed to be available
                     # everytime; the raptor measure.js webext will substitute a '-1' value in the
                     # cases where TTFI is not available, which is acceptable; however we don't want
                     # to include those '-1' TTFI values in our final results calculations
                     if measurement_name == "ttfi":
-                        filtered_values = filter.ignore_negative(filtered_values)
+                        filtered_values = filters.ignore_negative(filtered_values)
                         # we've already removed the first pageload value; if there aren't any more
                         # valid TTFI values available for this pageload just remove it from results
                         if len(filtered_values) < 1:
                             continue
 
                     # if 'alert_on' is set for this particular measurement, then we want to set the
                     # flag in the perfherder output to turn on alerting for this subtest
                     if self.subtest_alert_on is not None:
                         if measurement_name in self.subtest_alert_on:
                             LOG.info("turning on subtest alerting for measurement type: %s"
                                      % measurement_name)
                             new_subtest['shouldAlert'] = True
 
-                    new_subtest['value'] = filter.median(filtered_values)
+                    new_subtest['value'] = filters.median(filtered_values)
 
                     vals.append([new_subtest['value'], new_subtest['name']])
                     subtests.append(new_subtest)
 
             elif test.type == "benchmark":
                 if 'speedometer' in test.measurements:
                     subtests, vals = self.parseSpeedometerOutput(test)
                 elif 'motionmark' in test.measurements:
@@ -267,17 +267,17 @@ class Output(object):
                         combined_suites[next_suite['details']['name']]['subtests'] \
                             .append(next_subtest)
 
         # now we have a single entry for each test; with all replicates from all browser cycles
         for i, name in enumerate(combined_suites):
             vals = []
             for next_sub in combined_suites[name]['subtests']:
                 # calculate sub-test results (i.e. each measurement type)
-                next_sub['value'] = filter.median(next_sub['replicates'])
+                next_sub['value'] = filters.median(next_sub['replicates'])
                 # add to vals; vals is used to calculate overall suite result i.e. the
                 # geomean of all of the subtests / measurement types
                 vals.append([next_sub['value'], next_sub['name']])
 
             # calculate overall suite result ('value') which is geomean of all measures
             if len(combined_suites[name]['subtests']) > 1:
                 combined_suites[name]['value'] = self.construct_summary(vals, testname=name)
 
@@ -399,17 +399,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseWASMMiscOutput(self, test):
         '''
           {u'wasm-misc': [
@@ -436,17 +436,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].append(item['time'])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseWASMGodotOutput(self, test):
         '''
             {u'wasm-godot': [
@@ -475,17 +475,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].append(item['time'])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseWebaudioOutput(self, test):
         # each benchmark 'index' becomes a subtest; each pagecycle / iteration
         # of the test has multiple values per index/subtest
@@ -522,17 +522,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         print subtests
         return subtests, vals
 
     def parseMotionmarkOutput(self, test):
         # for motionmark we want the frameLength:average value for each test
@@ -577,17 +577,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].extend([replicate])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseSunspiderOutput(self, test):
         _subtests = {}
         data = test.measurements['sunspider']
@@ -604,17 +604,17 @@ class Output(object):
                 _subtests[sub]['replicates'].extend([round(x, 3) for x in replicates])
 
         subtests = []
         vals = []
 
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.mean(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.mean(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
 
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseUnityWebGLOutput(self, test):
         """
@@ -649,17 +649,17 @@ class Output(object):
                                       'replicates': []}
                 _subtests[sub]['replicates'].append(item['result'])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = filter.median(_subtests[name]['replicates'])
+            _subtests[name]['value'] = filters.median(_subtests[name]['replicates'])
             subtests.append(_subtests[name])
             vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def parseAssortedDomOutput(self, test):
         # each benchmark 'index' becomes a subtest; each pagecycle / iteration
         # of the test has multiple values
@@ -688,17 +688,17 @@ class Output(object):
                                        'replicates': []}
                 _subtests[_sub]['replicates'].extend([_value])
 
         vals = []
         subtests = []
         names = _subtests.keys()
         names.sort(reverse=True)
         for name in names:
-            _subtests[name]['value'] = round(filter.median(_subtests[name]['replicates']), 2)
+            _subtests[name]['value'] = round(filters.median(_subtests[name]['replicates']), 2)
             subtests.append(_subtests[name])
             # only use the 'total's to compute the overall result
             if name == 'total':
                 vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
     def summarize_screenshots(self, screenshots):
@@ -825,17 +825,17 @@ class Output(object):
             LOG.info("PERFHERDER_DATA: %s" % json.dumps(next_data_set))
             LOG.info("%s results can also be found locally at: %s" % (data_type, results_path))
 
         return True
 
     @classmethod
     def v8_Metric(cls, val_list):
         results = [i for i, j in val_list]
-        score = 100 * filter.geometric_mean(results)
+        score = 100 * filters.geometric_mean(results)
         return score
 
     @classmethod
     def JS_Metric(cls, val_list):
         """v8 benchmark score"""
         results = [i for i, j in val_list]
         return sum(results)
 
@@ -848,58 +848,58 @@ class Output(object):
         results = [i for i, j in val_list]
         # speedometer has 16 tests, each of these are made of up 9 subtests
         # and a sum of the 9 values.  We receive 160 values, and want to use
         # the 16 test values, not the sub test values.
         if len(results) != 160:
             raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results))
 
         results = results[9::10]
-        score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
+        score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor
         return score
 
     @classmethod
     def benchmark_score(cls, val_list):
         """
         benchmark_score: ares6/jetstream self reported as 'geomean'
         """
         results = [i for i, j in val_list if j == 'geomean']
-        return filter.mean(results)
+        return filters.mean(results)
 
     @classmethod
     def webaudio_score(cls, val_list):
         """
         webaudio_score: self reported as 'Geometric Mean'
         """
         results = [i for i, j in val_list if j == 'Geometric Mean']
-        return filter.mean(results)
+        return filters.mean(results)
 
     @classmethod
     def unity_webgl_score(cls, val_list):
         """
         unity_webgl_score: self reported as 'Geometric Mean'
         """
         results = [i for i, j in val_list if j == 'Geometric Mean']
-        return filter.mean(results)
+        return filters.mean(results)
 
     @classmethod
     def wasm_misc_score(cls, val_list):
         """
         wasm_misc_score: self reported as '__total__'
         """
         results = [i for i, j in val_list if j == '__total__']
-        return filter.mean(results)
+        return filters.mean(results)
 
     @classmethod
     def wasm_godot_score(cls, val_list):
         """
         wasm_godot_score: first-interactive mean
         """
         results = [i for i, j in val_list if j == 'first-interactive']
-        return filter.mean(results)
+        return filters.mean(results)
 
     @classmethod
     def stylebench_score(cls, val_list):
         """
         stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888
         """
         correctionFactor = 3
         results = [i for i, j in val_list]
@@ -935,28 +935,28 @@ class Output(object):
         #     75 entries for test before the sum.
         #
         # We receive 76 entries per test, which ads up to 380. We want to use
         # the 5 test entries, not the rest.
         if len(results) != 380:
             raise Exception("StyleBench has 380 entries, found: %s instead" % len(results))
 
         results = results[75::76]
-        score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
+        score = 60 * 1000 / filters.geometric_mean(results) / correctionFactor
         return score
 
     @classmethod
     def sunspider_score(cls, val_list):
         results = [i for i, j in val_list]
         return sum(results)
 
     @classmethod
     def assorted_dom_score(cls, val_list):
         results = [i for i, j in val_list]
-        return round(filter.geometric_mean(results), 2)
+        return round(filters.geometric_mean(results), 2)
 
     @classmethod
     def supporting_data_total(cls, val_list):
         results = [i for i, j in val_list]
         return sum(results)
 
     def construct_summary(self, vals, testname):
         if testname.startswith('raptor-v8_7'):
@@ -979,11 +979,11 @@ class Output(object):
             return self.assorted_dom_score(vals)
         elif testname.startswith('raptor-wasm-misc'):
             return self.wasm_misc_score(vals)
         elif testname.startswith('raptor-wasm-godot'):
             return self.wasm_godot_score(vals)
         elif testname.startswith('supporting_data'):
             return self.supporting_data_total(vals)
         elif len(vals) > 1:
-            return round(filter.geometric_mean([i for i, j in vals]), 2)
+            return round(filters.geometric_mean([i for i, j in vals]), 2)
         else:
-            return round(filter.mean([i for i, j in vals]), 2)
+            return round(filters.mean([i for i, j in vals]), 2)