Bug 1196415 - cleanup the useless log messages in talos. r=jmaher
authorJulien Pagès <j.parkouss@gmail.com>
Wed, 02 Sep 2015 21:28:17 +0200
changeset 1006 18be0ccfa48d
parent 1005 be34538a5581
child 1007 cbea3dea9d0c
push id695
push userj.parkouss@gmail.com
push dateThu, 03 Sep 2015 07:45:10 +0000
reviewersjmaher
bugs1196415
Bug 1196415 - cleanup the useless log messages in talos. r=jmaher
talos/ffsetup.py
talos/output.py
talos/results.py
talos/run_tests.py
talos/talos_process.py
talos/ttest.py
talos/utils.py
--- a/talos/ffsetup.py
+++ b/talos/ffsetup.py
@@ -104,17 +104,22 @@ class FFSetup(object):
 
     def _run_profile(self):
         command_args = utils.GenerateBrowserCommandLine(
             self.browser_config["browser_path"],
             self.browser_config["extra_args"],
             self.profile_dir,
             self.browser_config["init_url"]
         )
-        browser = ProcessHandler(command_args, env=self.env)
+
+        def browser_log(line):
+            logging.debug('BROWSER_OUTPUT: %s', line)
+
+        browser = ProcessHandler(command_args, env=self.env,
+                                 processOutputLine=browser_log)
         browser.run()
         try:
             browser.wait()
         except KeyboardInterrupt:
             browser.kill()
             raise
 
         results_raw = '\n'.join(browser.output)
@@ -137,20 +142,23 @@ class FFSetup(object):
             self.sps_profile.update_env(self.env)
 
     def clean(self):
         mozfile.remove(self._tmp_dir)
         if self.sps_profile:
             self.sps_profile.clean()
 
     def __enter__(self):
+        logging.info('Initialising browser for %s test...',
+                     self.test_config['name'])
         self._init_env()
         self._init_profile()
         try:
             self._run_profile()
         except:
             self.clean()
             raise
         self._init_sps_profile()
+        logging.info('Browser initialized.')
         return self
 
     def __exit__(self, type, value, tb):
         self.clean()
--- a/talos/output.py
+++ b/talos/output.py
@@ -156,33 +156,30 @@ class GraphserverOutput(Output):
             testname = test.name()
             if test.format == 'tpformat':
                 # for some reason, we append the test extension to tp results
                 # but not ts
                 # http://hg.mozilla.org/build/talos/file/170c100911b6/talos
                 # /run_tests.py#l176
                 testname += test.extension()
 
-            utils.stamped_msg("Generating results file: %s" % test.name(),
-                              "Started")
+            logging.debug("Generating results file: %s" % test.name())
 
             # HACK: when running xperf, we upload xperf counters to the graph
             # server but we do not want to
             # upload the test results as they will confuse the graph server
             if not (test.format == 'tpformat' and test.using_xperf):
                 vals = []
                 for result in test.results:
                     filtered_val = result.values(testname,
                                                  test.test_config['filters'])
                     vals.extend([[i['filtered'], j] for i, j in filtered_val])
                 result_strings.append(self.construct_results(vals,
                                                              testname=testname,
                                                              **info_dict))
-                utils.stamped_msg("Generating results file: %s" % test.name(),
-                                  "Stopped")
 
             # counter results
             for cd in test.all_counter_results:
                 for counter_type, values in cd.items():
                     # get the counter name
                     counterName = '%s_%s' % (test.name(),
                                              self.shortName(counter_type))
                     if not values:
--- a/talos/results.py
+++ b/talos/results.py
@@ -8,17 +8,16 @@
 objects and methods for parsing and serializing Talos results
 see https://wiki.mozilla.org/Buildbot/Talos/DataFormat
 """
 
 import json
 import os
 import re
 import csv
-import logging
 from talos import output, utils, filter
 
 
 class TalosResults(object):
     """Container class for Talos results"""
 
     def __init__(self, title, date, browser_config):
         self.results = []
@@ -55,17 +54,16 @@ class TalosResults(object):
         return [i for i in formats if i not in output.formats]
 
     def output(self, output_formats):
         """
         output all results to appropriate URLs
         - output_formats: a dict mapping formats to a list of URLs
         """
 
-        logging.info("Outputting talos results => %s", output_formats)
         tbpl_output = {}
         try:
 
             for key, urls in output_formats.items():
                 _output = output.formats[key](self)
                 results = _output()
                 for url in urls:
                     _output.output(results, url, tbpl_output)
@@ -79,17 +77,18 @@ class TalosResults(object):
                     'file://%s' % os.path.join(os.getcwd(), 'results.out'),
                     results
                 )
             except:
                 pass
             print '\nFAIL: %s' % str(e).replace('\n', '\nRETURN:')
             raise e
 
-        print "TinderboxPrint: TalosResult: %s" % json.dumps(tbpl_output)
+        if tbpl_output:
+            print "TinderboxPrint: TalosResult: %s" % json.dumps(tbpl_output)
 
 
 class TestResults(object):
     """container object for all test results across cycles"""
 
     def __init__(self, test_config, global_counters=None, extensions=None):
         self.results = []
         self.test_config = test_config
--- a/talos/run_tests.py
+++ b/talos/run_tests.py
@@ -210,67 +210,63 @@ def run_tests(config, browser_config):
     httpd = None
     if browser_config['develop']:
         httpd = setup_webserver(browser_config['webserver'])
         if httpd:
             httpd.start()
 
     # run the tests
     timer = utils.Timer()
-    utils.stamped_msg(title, "Started")
+    logging.info("Starting test suite %s", title)
     for test in tests:
         testname = test['name']
-        utils.stamped_msg("Running test " + testname, "Started")
+        testtimer = utils.Timer()
+        logging.info("Starting test %s", testname)
 
         mozfile.remove('logcat.log')
 
         try:
             mytest = TTest()
             if mytest:
                 talos_results.add(mytest.runTest(browser_config, test))
             else:
-                utils.stamped_msg("Error found while running %s" % testname,
-                                  "Error")
+                logging.error("Error found while running %s", testname)
         except TalosRegression:
-            utils.stamped_msg("Detected a regression for " + testname,
-                              "Stopped")
+            logging.error("Detected a regression for %s", testname)
             print_logcat()
             if httpd:
                 httpd.stop()
             # by returning 1, we report an orange to buildbot
             # http://docs.buildbot.net/latest/developer/results.html
             return 1
         except (TalosCrash, TalosError):
             # NOTE: if we get into this condition, talos has an internal
             # problem and cannot continue
             #       this will prevent future tests from running
-            utils.stamped_msg("Failed %s" % testname, "Stopped")
-            TalosError_tb = sys.exc_info()
-            traceback.print_exception(*TalosError_tb)
+            traceback.print_exception(*sys.exc_info())
             print_logcat()
             if httpd:
                 httpd.stop()
             # indicate a failure to buildbot, turn the job red
             return 2
 
-        utils.stamped_msg("Completed test " + testname, "Stopped")
+        logging.info("Completed test %s (%s)", testname, testtimer.elapsed())
         print_logcat()
 
-    elapsed = timer.elapsed()
-    print "cycle time: " + elapsed
-    utils.stamped_msg(title, "Stopped")
+    logging.info("Completed test suite (%s)", timer.elapsed())
 
     # stop the webserver if running
     if httpd:
         httpd.stop()
 
     # output results
     if results_urls:
         talos_results.output(results_urls)
         if browser_config['develop']:
+            print
             print ("Thanks for running Talos locally. Results are in"
                    " %s and %s" % (results_urls['results_urls'],
                                    results_urls['datazilla_urls']))
 
     # we will stop running tests on a failed test, or we will return 0 for
     # green
     return 0
 
--- a/talos/talos_process.py
+++ b/talos/talos_process.py
@@ -18,17 +18,17 @@ class Reader(object):
 
     def __call__(self, line):
         if line.find('__endTimestamp') != -1:
             self.got_end_timestamp = True
             self.event.set()
 
         if not (line.startswith('JavaScript error:') or
                 line.startswith('JavaScript warning:')):
-            print line
+            logging.debug('BROWSER_OUTPUT: %s', line)
             self.output.append(line)
 
 
 def run_browser(command, timeout=None, on_started=None, **kwargs):
     """
     Run the browser using the given `command`.
 
     After the browser prints __endTimestamp, we give it 5
--- a/talos/ttest.py
+++ b/talos/ttest.py
@@ -115,18 +115,16 @@ class TTest(object):
                 setup.env['MOZ_MAIN_THREAD_IO_LOG'] = mainthread_io
 
             test_config['url'] = utils.interpolate(
                 test_config['url'],
                 profile=setup.profile_dir,
                 firefox=browser_config['browser_path']
             )
 
-            logging.debug("initialized %s", browser_config['process'])
-
             # setup global (cross-cycle) counters:
             # shutdown, responsiveness
             global_counters = {}
             if browser_config.get('xperf_path'):
                 for c in test_config.get('xperf_counters', []):
                     global_counters[c] = []
 
             if test_config['shutdown']:
@@ -142,16 +140,18 @@ class TTest(object):
 
             # instantiate an object to hold test results
             test_results = results.TestResults(
                 test_config,
                 global_counters
             )
 
             for i in range(test_config['cycles']):
+                logging.info("Running cycle %d/%d for %s test...",
+                             i+1, test_config['cycles'], test_config['name'])
 
                 # remove the browser  error file
                 mozfile.remove(browser_config['error_filename'])
 
                 # reinstall any file whose stability we need to ensure across
                 # the cycles
                 if test_config.get('reinstall', ''):
                     for keep in test_config['reinstall']:
@@ -294,17 +294,16 @@ class TTest(object):
                                                test_config['name'])
 
             # include global (cross-cycle) counters
             test_results.all_counter_results.extend(
                 [{key: value} for key, value in global_counters.items()]
             )
             for c in test_results.all_counter_results:
                 for key, value in c.items():
-                    print "COUNTER: %s" % key
-                    print value
+                    logging.debug("COUNTER %r: %s", key, value)
 
             # return results
             return test_results
 
         except Exception:
             self.testCleanup(browser_config, setup.profile_dir, test_config)
             raise
--- a/talos/utils.py
+++ b/talos/utils.py
@@ -30,17 +30,17 @@ class Timer(object):
     def elapsed(self):
         seconds = time.time() - self._start_time
         return time.strftime("%H:%M:%S", time.gmtime(seconds))
 
 
 def startLogger(levelChoice):
     # declare and define global logger object to send logging messages to
     log_levels = {'debug': logging.DEBUG, 'info': logging.INFO}
-    logging.basicConfig(format='%(levelname)s : %(message)s',
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s : %(message)s',
                         level=log_levels[levelChoice])
 
 
 def stamped_msg(msg_title, msg_action):
     """Prints a message to the console with a time stamp
     """
     time_format = "%a, %d %b %Y %H:%M:%S"
     msg_format = "%s: \n\t\t%s %s"