Bug 1186834 - fix flake8 issues on talos. r=jmaher, a=test-only
authorJulien Pagès <j.parkouss@gmail.com>
Mon, 23 Nov 2015 17:05:45 +0100
changeset 305755 0409df514310c9be7092723c7d30b5d7a68acb99
parent 305754 dc0ea062f5a7868aa6ec089d4474c2cac6de90b4
child 305756 dba00f34c28d0f501d4c98fd9ca2850980d3fd47
push id1001
push userraliiev@mozilla.com
push dateMon, 18 Jan 2016 19:06:03 +0000
treeherdermozilla-release@8b89261f3ac4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher, test-only
bugs1186834
milestone44.0a2
Bug 1186834 - fix flake8 issues on talos. r=jmaher, a=test-only
testing/talos/talos/filter.py
testing/talos/talos/output.py
testing/talos/talos/results.py
testing/talos/talos/run_tests.py
testing/talos/talos/utils.py
testing/talos/talos/xtalos/etlparser.py
--- a/testing/talos/talos/filter.py
+++ b/testing/talos/talos/filter.py
@@ -181,9 +181,8 @@ def v8_subtest(series, name):
                  'NavierStokes': 1484000.,
                  'RayTrace': 739989.,
                  'RegExp': 910985.,
                  'Richards': 35302.,
                  'Splay': 81491.
                  }
 
     return reference[name] / geometric_mean(series)
-
--- a/testing/talos/talos/output.py
+++ b/testing/talos/talos/output.py
@@ -444,17 +444,18 @@ class PerfherderOutput(Output):
                 suite = {
                     'name': test.name(),
                     'subtests': subtests,
                 }
                 suites.append(suite)
                 vals = []
                 replicates = {}
 
-                # TODO: counters!!!! we don't have any, but they suffer the same
+                # TODO: counters!!!! we don't have any, but they suffer the
+                # same
                 for result in test.results:
                     # XXX this will not work for manifests which list
                     # the same page name twice. It also ignores cycles
                     for page, val in result.raw_values():
                         if page == 'NULL':
                             page = test.name()
                             if tsresult is None:
                                 tsresult = r = TalosResults.Results()
@@ -479,26 +480,28 @@ class PerfherderOutput(Output):
                             page = test.name()
                         subtest = {
                             'name': page,
                             'value': val['filtered'],
                             'replicates': replicates[page],
                         }
                         subtests.append(subtest)
                         if test.test_config.get('lower_is_better') is not None:
-                            subtest['lowerIsBetter'] = test.test_config['lower_is_better']
+                            subtest['lowerIsBetter'] = \
+                                test.test_config['lower_is_better']
                         if test.test_config.get('unit'):
                             subtest['unit'] = test.test_config['unit']
 
                 # if there is more than one subtest, calculate a summary result
                 if len(subtests) > 1:
                     suite['value'] = self.construct_results(
                         vals, testname=test.name())
                 if test.test_config.get('lower_is_better') is not None:
-                    suite['lowerIsBetter'] = test.test_config['lower_is_better']
+                    suite['lowerIsBetter'] = \
+                        test.test_config['lower_is_better']
 
             # counters results_aux data
             counter_subtests = []
             for cd in test.all_counter_results:
                 for name, vals in cd.items():
                     # We want to add the xperf data as talos_counters
                     # exclude counters whose values are tuples (bad for
                     # graphserver)
@@ -519,14 +522,15 @@ class PerfherderOutput(Output):
                     if test.using_xperf:
                         subtest['value'] = vals[0]
                     else:
                         # calculate mean value
                         if len(vals) > 0:
                             varray = [float(v) for v in vals]
                             subtest['value'] = filter.mean(varray)
             if counter_subtests:
-                suites.append({'name': test.name(), 'subtests': counter_subtests})
+                suites.append({'name': test.name(),
+                               'subtests': counter_subtests})
         return test_results
 
 # available output formats
 formats = {'datazilla_urls': PerfherderOutput,
            'results_urls': GraphserverOutput}
--- a/testing/talos/talos/results.py
+++ b/testing/talos/talos/results.py
@@ -162,17 +162,17 @@ class Results(object):
             for f in remaining_filters:
                 if f.func.__name__ == "v8_subtest":
                     # for v8_subtest we need to page for reference data
                     data = filter.v8_subtest(data, page)
                 else:
                     data = f.apply(data)
 
             summary = {
-                'filtered': data, # for backwards compatibility with perfherder
+                'filtered': data,  # backward compatibility with perfherder
                 'value': data
             }
 
             retval.append([summary, page])
 
         return retval
 
     def raw_values(self):
--- a/testing/talos/talos/run_tests.py
+++ b/testing/talos/talos/run_tests.py
@@ -1,16 +1,15 @@
 #!/usr/bin/env python
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import mozversion
-import mozfile
 import logging
 import os
 import sys
 import time
 import traceback
 import urllib
 import utils
 import mozhttpd
@@ -190,17 +189,18 @@ def run_tests(config, browser_config):
         for test in tests:
             testname = test['name']
             testtimer = utils.Timer()
             logging.info("Starting test %s", testname)
 
             mytest = TTest()
             talos_results.add(mytest.runTest(browser_config, test))
 
-            logging.info("Completed test %s (%s)", testname, testtimer.elapsed())
+            logging.info("Completed test %s (%s)", testname,
+                         testtimer.elapsed())
 
     except TalosRegression:
         logging.error("Detected a regression for %s", testname)
         # by returning 1, we report an orange to buildbot
         # http://docs.buildbot.net/latest/developer/results.html
         return 1
     except (TalosCrash, TalosError):
         # NOTE: if we get into this condition, talos has an internal
--- a/testing/talos/talos/utils.py
+++ b/testing/talos/talos/utils.py
@@ -1,16 +1,15 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 """Utility functions for Talos"""
 
 import os
-import sys
 import time
 import urlparse
 import string
 import urllib
 import logging
 import json
 import re
 import platform
--- a/testing/talos/talos/xtalos/etlparser.py
+++ b/testing/talos/talos/xtalos/etlparser.py
@@ -450,17 +450,18 @@ def etlparser(xperf_path, etl_filename, 
             print "TEST-UNEXPECTED-FAIL : xperf: %s" % error
 
         # We detect if browser_failures.txt exists to exit and turn the job
         # orange
         if error_filename:
             with open(error_filename, 'w') as errorFile:
                 errorFile.write('\n'.join(errors))
 
-# TODO: commented this out from bug 1205005, we have a memory error in blobber upload
+# TODO: commented this out from bug 1205005, we have a memory error in blobber
+# upload
 #        mud = os.environ.get('MOZ_UPLOAD_DIR', None)
 #        if mud:
 #            mud_filename = os.path.join(mud, etl_filename)
 #            os.rename(etl_filename, mud_filename)
 
 
 def etlparser_from_config(config_file, **kwargs):
     """start from a YAML config file"""