Bug 1229379 - use mozlog in talos. r=jgraham,jmaher
☠☠ backed out by fbb48f4efd21 ☠ ☠
authorJulien Pagès <j.parkouss@gmail.com>
Thu, 17 Dec 2015 12:11:28 +0100
changeset 276846 4acf219571112bca9dce8b1802bc91c1febe1f4f
parent 276845 6d8293bceabf8d3d1c934e3121936581a7e74e42
child 276847 282f22cbf7ef8c719248f2bf871af4ee2de9ad5f
push id29810
push usercbook@mozilla.com
push dateFri, 18 Dec 2015 14:24:54 +0000
treeherdermozilla-central@c5cb194cc9cb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjgraham, jmaher
bugs1229379
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1229379 - use mozlog in talos. r=jgraham,jmaher
testing/mozharness/mozharness/mozilla/testing/talos.py
testing/talos/requirements.txt
testing/talos/talos/cmdline.py
testing/talos/talos/config.py
testing/talos/talos/ffsetup.py
testing/talos/talos/output.py
testing/talos/talos/run_tests.py
testing/talos/talos/sps_profile.py
testing/talos/talos/startup_test/media/media_manager.py
testing/talos/talos/talos_process.py
testing/talos/talos/ttest.py
testing/talos/talos/utils.py
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -377,17 +377,28 @@ class Talos(TestingMixin, MercurialScrip
             env['PYTHONPATH'] = self.talos_path
 
         self._flake8_check(parser)
 
         # sets a timeout for how long talos should run without output
         output_timeout = self.config.get('talos_output_timeout', 3600)
         # run talos tests
         run_tests = os.path.join(self.talos_path, 'talos', 'run_tests.py')
-        command = [python, run_tests, '--debug'] + options
+
+        mozlog_opts = ['--log-tbpl-level=debug']
+        if not self.run_local and 'suite' in self.config:
+            fname_pattern = '%s_%%s.log' % self.config['suite']
+            mozlog_opts.append('--log-errorsummary=%s'
+                               % os.path.join(env['MOZ_UPLOAD_DIR'],
+                                              fname_pattern % 'errorsummary'))
+            mozlog_opts.append('--log-raw=%s'
+                               % os.path.join(env['MOZ_UPLOAD_DIR'],
+                                              fname_pattern % 'raw'))
+
+        command = [python, run_tests] + options + mozlog_opts
         self.return_code = self.run_command(command, cwd=self.workdir,
                                             output_timeout=output_timeout,
                                             output_parser=parser,
                                             env=env)
         if parser.minidump_output:
             self.info("Looking at the minidump files for debugging purposes...")
             for item in parser.minidump_output:
                 self.run_command(["ls", "-l", item])
--- a/testing/talos/requirements.txt
+++ b/testing/talos/requirements.txt
@@ -1,8 +1,9 @@
+mozlog>=3.1
 mozcrash>=0.15
 mozfile>=1.2
 mozhttpd>=0.7
 mozinfo>=0.8
 mozprocess>=0.22
 mozversion>=1.3
 mozprofile>=0.25
 psutil>=3.1.1
--- a/testing/talos/talos/cmdline.py
+++ b/testing/talos/talos/cmdline.py
@@ -1,15 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import argparse
 import os
 
+from mozlog.commandline import add_logging_group
+
 
 class _StopAction(argparse.Action):
     def __init__(self, option_strings, dest=argparse.SUPPRESS,
                  default=argparse.SUPPRESS, help=None):
         super(_StopAction, self).__init__(
             option_strings=option_strings,
             dest=dest,
             default=default,
@@ -132,16 +134,16 @@ def create_parser(mach_interface=False):
     add_arg('--repository',
             help='Specify the url for the repository we are testing. '
                  'This will use the value found in application.ini if'
                  ' it is not specified.')
     add_arg('--print-tests', action=_ListTests,
             help="print available tests")
     add_arg('--print-suites', action=_ListSuite,
             help="list available suites")
-    add_arg('--debug', action='store_true',
-            help='show debug information')
+
+    add_logging_group(parser)
     return parser
 
 
 def parse_args(argv=None):
     parser = create_parser()
     return parser.parse_args(argv)
--- a/testing/talos/talos/config.py
+++ b/testing/talos/talos/config.py
@@ -1,16 +1,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import sys
 import os
 import copy
 
+from mozlog.commandline import setup_logging
+
 from talos import utils, test
 from talos.cmdline import parse_args
 
 
 class ConfigurationError(Exception):
     pass
 
 
@@ -443,22 +445,22 @@ def get_config(argv=None):
     if cli_opts.suite:
         # read the suite config, update the args
         try:
             suite_conf = suites_conf()[cli_opts.suite]
         except KeyError:
             raise ConfigurationError('No such suite: %r' % cli_opts.suite)
         argv += ['-a', ':'.join(suite_conf['tests'])]
         argv += suite_conf.get('talos_options', [])
-        # and reparse the args
-        cli_opts = parse_args(argv=argv)
+        # args needs to be reparsed now
     elif not cli_opts.activeTests:
         raise ConfigurationError('--activeTests or --suite required!')
 
     cli_opts = parse_args(argv=argv)
+    setup_logging("talos", cli_opts, {"tbpl": sys.stdout})
     config = copy.deepcopy(DEFAULTS)
     config.update(cli_opts.__dict__)
     for validate in CONF_VALIDATORS:
         validate(config)
     # remove None Values
     for k, v in config.items():
         if v is None:
             del config[k]
--- a/testing/talos/talos/ffsetup.py
+++ b/testing/talos/talos/ffsetup.py
@@ -4,26 +4,30 @@
 
 """
 Set up a browser environment before running a test.
 """
 
 import os
 import re
 import tempfile
-import logging
 import mozfile
 from mozprocess import ProcessHandler
 from mozprofile.profile import Profile
 
+from mozlog import get_proxy_logger
+
 from talos import utils
 from talos.utils import TalosError
 from talos.sps_profile import SpsProfile
 
 
+LOG = get_proxy_logger()
+
+
 class FFSetup(object):
     """
     Initialize the browser environment before running a test.
 
     This prepares:
      - the environment vars for running the test in the browser,
        available via the instance member *env*.
      - the profile used to run the test, available via the
@@ -106,59 +110,61 @@ class FFSetup(object):
         command_args = utils.GenerateBrowserCommandLine(
             self.browser_config["browser_path"],
             self.browser_config["extra_args"],
             self.profile_dir,
             self.browser_config["init_url"]
         )
 
         def browser_log(line):
-            logging.debug('BROWSER_OUTPUT: %s', line)
+            LOG.process_output(browser.pid, line)
 
         browser = ProcessHandler(command_args, env=self.env,
                                  processOutputLine=browser_log)
         browser.run()
+        LOG.process_start(browser.pid, ' '.join(command_args))
         try:
-            browser.wait()
+            exit_code = browser.wait()
         except KeyboardInterrupt:
             browser.kill()
             raise
 
+        LOG.process_exit(browser.pid, exit_code)
         results_raw = '\n'.join(browser.output)
 
         if not self.PROFILE_REGEX.search(results_raw):
-            logging.info("Could not find %s in browser output",
-                         self.PROFILE_REGEX.pattern)
-            logging.info("Raw results:%s", results_raw)
+            LOG.info("Could not find %s in browser output"
+                     % self.PROFILE_REGEX.pattern)
+            LOG.info("Raw results:%s" % results_raw)
             raise TalosError("browser failed to close after being initialized")
 
     def _init_sps_profile(self):
         upload_dir = os.getenv('MOZ_UPLOAD_DIR')
         if self.test_config.get('sps_profile') and not upload_dir:
-            logging.critical("Profiling ignored because MOZ_UPLOAD_DIR was not"
-                             " set")
+            LOG.critical("Profiling ignored because MOZ_UPLOAD_DIR was not"
+                         " set")
         if upload_dir and self.test_config.get('sps_profile'):
             self.sps_profile = SpsProfile(upload_dir,
                                           self.browser_config,
                                           self.test_config)
             self.sps_profile.update_env(self.env)
 
     def clean(self):
         mozfile.remove(self._tmp_dir)
         if self.sps_profile:
             self.sps_profile.clean()
 
     def __enter__(self):
-        logging.info('Initialising browser for %s test...',
-                     self.test_config['name'])
+        LOG.info('Initialising browser for %s test...'
+                 % self.test_config['name'])
         self._init_env()
         self._init_profile()
         try:
             self._run_profile()
         except:
             self.clean()
             raise
         self._init_sps_profile()
-        logging.info('Browser initialized.')
+        LOG.info('Browser initialized.')
         return self
 
     def __exit__(self, type, value, tb):
         self.clean()
--- a/testing/talos/talos/output.py
+++ b/testing/talos/talos/output.py
@@ -2,25 +2,27 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 """output formats for Talos"""
 
 import filter
 import json
-import logging
 import post_file
 import time
 import utils
 
+from StringIO import StringIO
+from mozlog import get_proxy_logger
+
 # NOTE: we have a circular dependecy with output.py when we import results
 import results as TalosResults
 
-from StringIO import StringIO
+LOG = get_proxy_logger()
 
 
 def filesizeformat(bytes):
     """
     Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102
     bytes, etc).
     """
     bytes = float(bytes)
@@ -103,24 +105,24 @@ class Output(object):
         results = [i for i, j in val_list]
         score = 100 * filter.geometric_mean(results)
         return score
 
     @classmethod
     def JS_Metric(cls, val_list):
         """v8 benchmark score"""
         results = [i for i, j in val_list]
-        logging.info("javascript benchmark")
+        LOG.info("javascript benchmark")
         return sum(results)
 
     @classmethod
     def CanvasMark_Metric(cls, val_list):
         """CanvasMark benchmark score (NOTE: this is identical to JS_Metric)"""
         results = [i for i, j in val_list]
-        logging.info("CanvasMark benchmark")
+        LOG.info("CanvasMark benchmark")
         return sum(results)
 
 
 class GraphserverOutput(Output):
 
     retries = 5  # number of times to attempt to contact graphserver
     info_format = ['title', 'testname', 'branch_name', 'sourcestamp',
                    'buildid', 'date']
@@ -145,28 +147,28 @@ class GraphserverOutput(Output):
             branch_name=self.results.browser_config['branch_name'],
             sourcestamp=self.results.browser_config['sourcestamp'],
             buildid=self.results.browser_config['buildid'],
             browser_name=self.results.browser_config['browser_name'],
             browser_version=self.results.browser_config['browser_version']
         )
 
         for test in self.results.results:
-            logging.debug("Working with test: %s", test.name())
+            LOG.debug("Working with test: %s" % test.name())
 
             # get full name of test
             testname = test.name()
             if test.format == 'tpformat':
                 # for some reason, we append the test extension to tp results
                 # but not ts
                 # http://hg.mozilla.org/build/talos/file/170c100911b6/talos
                 # /run_tests.py#l176
                 testname += test.extension()
 
-            logging.debug("Generating results file: %s" % test.name())
+            LOG.debug("Generating results file: %s" % test.name())
 
             # HACK: when running xperf, we upload xperf counters to the graph
             # server but we do not want to
             # upload the test results as they will confuse the graph server
             if not (test.format == 'tpformat' and test.using_xperf):
                 vals = []
                 for result in test.results:
                     filtered_val = result.values(testname,
@@ -179,17 +181,17 @@ class GraphserverOutput(Output):
             # counter results
             for cd in test.all_counter_results:
                 for counter_type, values in cd.items():
                     # get the counter name
                     counterName = '%s_%s' % (test.name(),
                                              self.shortName(counter_type))
                     if not values:
                         # failed to collect any data for this counter
-                        logging.error(
+                        LOG.error(
                             "No results collected for: " + counterName
                         )
 # NOTE: we are not going to enforce this warning for now as this happens too
 # frequently: bugs 803413, 802475, 805925
 #                        raise utils.TalosError("Unable to proceed with missing
 # counter '%s'" % counterName)
 # (jhammel: we probably should do this in e.g. results.py vs in
 # graphserver-specific code anyway)
@@ -213,17 +215,17 @@ class GraphserverOutput(Output):
                     # append test name extension but only for tpformat tests
                     if test.format == 'tpformat':
                         counterName += test.extension()
 
                     info = info_dict.copy()
                     info['testname'] = counterName
 
                     # append the counter string
-                    logging.info(
+                    LOG.info(
                         "Generating results file: %s" % counterName)
                     result_strings.append(self.construct_results(vals, **info))
 
         return result_strings
 
     def responsiveness_test(self, testname):
         """returns if the test is a responsiveness test"""
         # XXX currently this just looks for the string
@@ -271,51 +273,52 @@ class GraphserverOutput(Output):
         if average is not None:
             # write some kind of average
             buffer.write("%s\n" % average)
         else:
             for i, (val, page) in enumerate(vals):
                 try:
                     buffer.write("%d,%.2f,%s\n" % (i, float(val), page))
                 except ValueError:
-                    logging.info(
+                    LOG.info(
                         "We expected a numeric value and recieved '%s' instead"
                         % val
                     )
                     pass
 
         buffer.write("END")
         return buffer.getvalue()
 
     def process_Request(self, post):
         """get links from the graphserver response"""
         links = ""
         for line in post.splitlines():
             if line.find("RETURN\t") > -1:
                 line = line.replace("RETURN\t", "")
                 links += line + '\n'
-            logging.debug("process_Request line: %s", line)
+            LOG.debug("process_Request line: %s" % line)
         if not links:
             raise utils.TalosError("send failed, graph server says:\n%s"
                                    % post)
         return links
 
     def post(self, results, server, path, scheme, tbpl_output):
         """post results to the graphserver"""
 
         links = []
         wait_time = 5  # number of seconds between each attempt
 
         for index, data_string in enumerate(results):
             times = 0
             msg = ""
             while times < self.retries:
-                logging.info(
-                    "Posting result %d of %d to %s://%s%s, attempt %d",
-                    index, len(results), scheme, server, path, times)
+                LOG.info(
+                    "Posting result %d of %d to %s://%s%s, attempt %d" % (
+                        index, len(results), scheme, server, path, times)
+                )
                 try:
                     links.append(self.process_Request(
                         post_file.post_multipart(server, path,
                                                  files=[("filename",
                                                          "data_string",
                                                          data_string)])))
                     break
                 except utils.TalosError, e:
@@ -399,17 +402,17 @@ class PerfherderOutput(Output):
         """
 
         # parse the results url
         results_url_split = utils.urlsplit(results_url)
         results_scheme, results_server, results_path, _, _ = results_url_split
 
         # This is the output that treeherder expects to find when parsing the
         # log file
-        logging.info("PERFHERDER_DATA: %s" % json.dumps(results))
+        LOG.info("PERFHERDER_DATA: %s" % json.dumps(results))
         if results_scheme in ('file'):
             json.dump(results, file(results_path, 'w'), indent=2,
                       sort_keys=True)
 
     def post(self, results, server, path, scheme, tbpl_output):
         """conform to current code- not needed for perfherder"""
         pass
 
--- a/testing/talos/talos/run_tests.py
+++ b/testing/talos/talos/run_tests.py
@@ -1,31 +1,33 @@
 #!/usr/bin/env python
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import mozversion
-import logging
 import os
 import sys
 import time
 import traceback
 import urllib
 import utils
 import mozhttpd
 
+from mozlog import get_proxy_logger
+
 from talos.results import TalosResults
 from talos.ttest import TTest
 from talos.utils import TalosError, TalosCrash, TalosRegression
 from talos.config import get_configs, ConfigurationError
 
 # directory of this file
 here = os.path.dirname(os.path.realpath(__file__))
+LOG = get_proxy_logger()
 
 
 def useBaseTestDefaults(base, tests):
     for test in tests:
         for item in base:
             if item not in test:
                 test[item] = base[item]
                 if test[item] is None:
@@ -62,17 +64,17 @@ def buildCommandLine(test):
     # XXX we should actually return the list but since we abuse
     # the url as a command line flag to pass to firefox all over the place
     # will just make a string for now
     return ' '.join(url)
 
 
 def setup_webserver(webserver):
     """use mozhttpd to setup a webserver"""
-    logging.info("starting webserver on %r" % webserver)
+    LOG.info("starting webserver on %r" % webserver)
 
     host, port = webserver.split(':')
     return mozhttpd.MozHttpd(host=host, port=int(port), docroot=here)
 
 
 def run_tests(config, browser_config):
     """Runs the talos tests on the given configuration and generates a report.
     """
@@ -148,18 +150,18 @@ def run_tests(config, browser_config):
             browser_config['sourcestamp'] = 'develop'
 
     # get test date in seconds since epoch
     if testdate:
         date = int(time.mktime(time.strptime(testdate,
                                              '%a, %d %b %Y %H:%M:%S GMT')))
     else:
         date = int(time.time())
-    logging.debug("using testdate: %d", date)
-    logging.debug("actual date: %d", int(time.time()))
+    LOG.debug("using testdate: %d" % date)
+    LOG.debug("actual date: %d" % int(time.time()))
 
     # results container
     talos_results = TalosResults(title=title,
                                  date=date,
                                  browser_config=browser_config)
 
     # results links
     if not browser_config['develop']:
@@ -180,44 +182,44 @@ def run_tests(config, browser_config):
 
     httpd = setup_webserver(browser_config['webserver'])
     httpd.start()
 
     testname = None
     try:
         # run the tests
         timer = utils.Timer()
-        logging.info("Starting test suite %s", title)
+        LOG.info("Starting test suite %s" % title)
         for test in tests:
             testname = test['name']
             testtimer = utils.Timer()
-            logging.info("Starting test %s", testname)
+            LOG.info("Starting test %s" % testname)
 
             mytest = TTest()
             talos_results.add(mytest.runTest(browser_config, test))
 
-            logging.info("Completed test %s (%s)", testname,
-                         testtimer.elapsed())
+            LOG.info("Completed test %s (%s)"
+                     % (testname,  testtimer.elapsed()))
 
     except TalosRegression:
-        logging.error("Detected a regression for %s", testname)
+        LOG.error("Detected a regression for %s" % testname)
         # by returning 1, we report an orange to buildbot
         # http://docs.buildbot.net/latest/developer/results.html
         return 1
     except (TalosCrash, TalosError):
         # NOTE: if we get into this condition, talos has an internal
         # problem and cannot continue
         #       this will prevent future tests from running
         traceback.print_exception(*sys.exc_info())
         # indicate a failure to buildbot, turn the job red
         return 2
     finally:
         httpd.stop()
 
-    logging.info("Completed test suite (%s)", timer.elapsed())
+    LOG.info("Completed test suite (%s)" % timer.elapsed())
 
     # output results
     if results_urls:
         talos_results.output(results_urls)
         if browser_config['develop']:
             print
             print ("Thanks for running Talos locally. Results are in"
                    " %s and %s" % (results_urls['results_urls'],
@@ -228,14 +230,13 @@ def run_tests(config, browser_config):
     return 0
 
 
 def main(args=sys.argv[1:]):
     try:
         config, browser_config = get_configs()
     except ConfigurationError, exc:
         sys.exit("ERROR: %s" % exc)
-    utils.startLogger('debug' if config['debug'] else 'info')
     sys.exit(run_tests(config, browser_config))
 
 
 if __name__ == '__main__':
     main()
--- a/testing/talos/talos/sps_profile.py
+++ b/testing/talos/talos/sps_profile.py
@@ -3,24 +3,26 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 """
 module to handle sps profilling.
 """
 
 import os
 import tempfile
-import logging
 import zipfile
 import json
-
 import mozfile
 
+from mozlog import get_proxy_logger
+
 from talos.profiler import symbolication, sps
 
+LOG = get_proxy_logger()
+
 
 class SpsProfile(object):
     """
     Handle sps profilling.
 
     This allow to collect sps profiling data and to zip results in one file.
     """
     def __init__(self, upload_dir, browser_config, test_config):
@@ -37,30 +39,30 @@ class SpsProfile(object):
         sps_profile_threads = 'GeckoMain,Compositor'
 
         # Make sure no archive already exists in the location where
         # we plan to output our profiler archive
         self.profile_arcname = os.path.join(
             self.upload_dir,
             "profile_{0}.sps.zip".format(test_config['name'])
         )
-        logging.info("Clearing archive {0}".format(self.profile_arcname))
+        LOG.info("Clearing archive {0}".format(self.profile_arcname))
         mozfile.remove(self.profile_arcname)
 
         self.symbol_paths = {
             'FIREFOX': tempfile.mkdtemp(),
             'THUNDERBIRD': tempfile.mkdtemp(),
             'WINDOWS': tempfile.mkdtemp()
         }
 
-        logging.info("Activating Gecko Profiling. Temp. profile dir:"
-                     " {0}, interval: {1}, entries: {2}"
-                     .format(sps_profile_dir,
-                             sps_profile_interval,
-                             sps_profile_entries))
+        LOG.info("Activating Gecko Profiling. Temp. profile dir:"
+                 " {0}, interval: {1}, entries: {2}"
+                 .format(sps_profile_dir,
+                         sps_profile_interval,
+                         sps_profile_entries))
 
         self.profiling_info = {
             "sps_profile_interval": sps_profile_interval,
             "sps_profile_entries": sps_profile_entries,
             "sps_profile_dir": sps_profile_dir,
             "sps_profile_threads": sps_profile_threads
         }
 
@@ -89,25 +91,27 @@ class SpsProfile(object):
             with open(profile_path, 'r') as profile_file:
                 profile = json.load(profile_file)
             symbolicator.dump_and_integrate_missing_symbols(
                 profile,
                 missing_symbols_zip)
             symbolicator.symbolicate_profile(profile)
             sps.save_profile(profile, profile_path)
         except MemoryError:
-            logging.exception(
+            LOG.critical(
                 "Ran out of memory while trying"
                 " to symbolicate profile {0} (cycle {1})"
-                .format(profile_path, cycle)
+                .format(profile_path, cycle),
+                exc_info=True
             )
         except Exception:
-            logging.exception("Encountered an exception during profile"
-                              " symbolication {0} (cycle {1})"
-                              .format(profile_path, cycle))
+            LOG.critical("Encountered an exception during profile"
+                         " symbolication {0} (cycle {1})"
+                         .format(profile_path, cycle),
+                         exc_info=True)
 
     def symbolicate(self, cycle):
         """
         Symbolicate sps profiling data for one cycle.
 
         :param cycle: the number of the cycle of the test currently run.
         """
         symbolicator = symbolication.ProfileSymbolicator({
@@ -177,24 +181,24 @@ class SpsProfile(object):
                 # 'profile_tscrollx/iframe.svg/cycle_0.sps'.
                 cycle_name = "cycle_{0}.sps".format(cycle)
                 path_in_zip = \
                     os.path.join(
                         "profile_{0}".format(self.test_config['name']),
                         testname,
                         cycle_name
                     )
-                logging.info(
+                LOG.info(
                     "Adding profile {0} to archive {1}"
                     .format(path_in_zip, self.profile_arcname)
                 )
                 try:
                     arc.write(profile_path, path_in_zip)
                 except Exception:
-                    logging.exception(
+                    LOG.exception(
                         "Failed to copy profile {0} as {1} to"
                         " archive {2}".format(profile_path,
                                               path_in_zip,
                                               self.profile_arcname)
                     )
 
     def clean(self):
         """
--- a/testing/talos/talos/startup_test/media/media_manager.py
+++ b/testing/talos/talos/startup_test/media/media_manager.py
@@ -1,21 +1,24 @@
 # vim:se?t ts=2 sw=2 sts=2 et cindent:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 import sys
 import platform
 import optparse
-import logging
 import mozhttpd
 # media test utilities
 import media_utils
 
+from mozlog import get_proxy_logger
+
+LOG = get_proxy_logger()
+
 """
 MediaManager serves as entry point for running media performance tests.
 It is responsible for the following
 1. Manage life-cycle of the mozHttpdServer and browser instance
 2. Provide RESTP API handlers for the Media tests
 3. REST API design follows the pattern
      <resource>/<component>/<operation>
      Example: audio recorder functionality
@@ -129,19 +132,19 @@ def run_server(doc_root):
         port=16932,
         docroot=doc_root,
         urlhandlers=[
             {'method': 'GET', 'path': '/audio/', 'function': parseGETUrl},
             {'method': 'GET', 'path': '/server/?', 'function': parseGETUrl}
         ]
     )
 
-    logging.info("Server %s at %s:%s",
-                 httpd_server.docroot, httpd_server.host,
-                 httpd_server.port)
+    LOG.info("Server %s at %s:%s" % (httpd_server.docroot,
+                                     httpd_server.host,
+                                     httpd_server.port))
     ObjectDb.httpd_server = httpd_server
     httpd_server.start()
     return httpd_server
 
 if __name__ == "__main__":
     # Linux platform is supported
     if not platform.system() == "Linux":
         print "This version of the tool supports only Linux"
--- a/testing/talos/talos/talos_process.py
+++ b/testing/talos/talos/talos_process.py
@@ -1,66 +1,70 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import time
-import logging
 import psutil
 import mozcrash
 from mozprocess import ProcessHandler
 from threading import Event
 
+from mozlog import get_proxy_logger
+
 from utils import TalosError
 
+LOG = get_proxy_logger()
+
 
 class ProcessContext(object):
     """
     Store useful results of the browser execution.
     """
     def __init__(self):
         self.output = None
         self.process = None
 
     @property
     def pid(self):
         return self.process and self.process.pid
 
     def kill_process(self):
         if self.process and self.process.is_running():
-            logging.debug("Terminating %s", self.process)
+            LOG.debug("Terminating %s" % self.process)
             self.process.terminate()
             try:
                 self.process.wait(3)
             except psutil.TimeoutExpired:
                 self.process.kill()
                 # will raise TimeoutExpired if unable to kill
                 self.process.wait(3)
 
 
 class Reader(object):
     def __init__(self, event):
         self.output = []
         self.got_end_timestamp = False
         self.got_timeout = False
         self.timeout_message = ''
         self.event = event
+        self.proc = None
 
     def __call__(self, line):
         if line.find('__endTimestamp') != -1:
             self.got_end_timestamp = True
             self.event.set()
         elif line == 'TART: TIMEOUT':
             self.got_timeout = True
             self.timeout_message = 'TART'
             self.event.set()
 
         if not (line.startswith('JavaScript error:') or
                 line.startswith('JavaScript warning:')):
-            logging.debug('BROWSER_OUTPUT: %s', line)
+            LOG.process_output(self.proc.pid, line)
             self.output.append(line)
 
 
 def run_browser(command, minidump_dir, timeout=None, on_started=None,
                 **kwargs):
     """
     Run the browser using the given `command`.
 
@@ -89,33 +93,35 @@ def run_browser(command, minidump_dir, t
     wait_for_quit_timeout = 5
     event = Event()
     reader = Reader(event)
 
     kwargs['storeOutput'] = False
     kwargs['processOutputLine'] = reader
     kwargs['onFinish'] = event.set
     proc = ProcessHandler(command, **kwargs)
+    reader.proc = proc
     proc.run()
+    LOG.process_start(proc.pid, ' '.join(command))
     try:
         context.process = psutil.Process(proc.pid)
         if on_started:
             on_started(context.process)
         # wait until we saw __endTimestamp in the proc output,
         # or the browser just terminated - or we have a timeout
         if not event.wait(timeout):
             # try to extract the minidump stack if the browser hangs
             mozcrash.kill_and_get_minidump(proc.pid, minidump_dir)
             raise TalosError("timeout")
         if reader.got_end_timestamp:
             for i in range(1, wait_for_quit_timeout):
                 if proc.wait(1) is not None:
                     break
             if proc.poll() is None:
-                logging.info(
+                LOG.info(
                     "Browser shutdown timed out after {0} seconds, terminating"
                     " process.".format(wait_for_quit_timeout)
                 )
         elif reader.got_timeout:
             raise TalosError('TIMEOUT: %s' % reader.timeout_message)
     finally:
         # this also handle KeyboardInterrupt
         # ensure early the process is really terminated
@@ -124,11 +130,11 @@ def run_browser(command, minidump_dir, t
 
     reader.output.append(
         "__startBeforeLaunchTimestamp%d__endBeforeLaunchTimestamp"
         % first_time)
     reader.output.append(
         "__startAfterTerminationTimestamp%d__endAfterTerminationTimestamp"
         % (int(time.time()) * 1000))
 
-    logging.info("Browser exited with error code: {0}".format(return_code))
+    LOG.process_exit(proc.pid, return_code)
     context.output = reader.output
     return context
--- a/testing/talos/talos/ttest.py
+++ b/testing/talos/talos/ttest.py
@@ -17,23 +17,26 @@ import sys
 import platform
 import results
 import subprocess
 import utils
 import mozcrash
 import talosconfig
 import shutil
 import mozfile
-import logging
+
+from mozlog import get_proxy_logger
 
 from talos.utils import TalosCrash, TalosRegression
 from talos.talos_process import run_browser
 from talos.ffsetup import FFSetup
 from talos.cmanager import CounterManagement
 
+LOG = get_proxy_logger()
+
 
 class TTest(object):
     platform_type = utils.PLATFORM_TYPE
 
     def check_for_crashes(self, browser_config, minidump_dir, test_name):
         # check for minidumps
         found = mozcrash.check_for_crashes(minidump_dir,
                                            browser_config['symbols_path'],
@@ -51,17 +54,17 @@ class TTest(object):
         Args:
             browser_config:  Dictionary of configuration options for the
                              browser (paths, prefs, etc)
             test_config   :  Dictionary of configuration for the given
                              test (url, cycles, counters, etc)
 
         """
 
-        logging.debug("operating with platform_type : %s", self.platform_type)
+        LOG.debug("operating with platform_type : %s" % self.platform_type)
         with FFSetup(browser_config, test_config) as setup:
             return self._runTest(browser_config, test_config, setup)
 
     def _runTest(self, browser_config, test_config, setup):
         minidump_dir = os.path.join(setup.profile_dir, 'minidumps')
         counters = test_config.get(self.platform_type + 'counters', [])
         resolution = test_config['resolution']
 
@@ -98,31 +101,31 @@ class TTest(object):
 
         # instantiate an object to hold test results
         test_results = results.TestResults(
             test_config,
             global_counters
         )
 
         for i in range(test_config['cycles']):
-            logging.info("Running cycle %d/%d for %s test...",
-                         i+1, test_config['cycles'], test_config['name'])
+            LOG.info("Running cycle %d/%d for %s test..."
+                     % (i+1, test_config['cycles'], test_config['name']))
 
             # remove the browser  error file
             mozfile.remove(browser_config['error_filename'])
 
             # reinstall any file whose stability we need to ensure across
             # the cycles
             if test_config.get('reinstall', ''):
                 for keep in test_config['reinstall']:
                     origin = os.path.join(test_config['profile_path'],
                                           keep)
                     dest = os.path.join(setup.profile_dir, keep)
-                    logging.debug("Reinstalling %s on top of %s", origin,
-                                  dest)
+                    LOG.debug("Reinstalling %s on top of %s"
+                              % (origin, dest))
                     shutil.copy(origin, dest)
 
             # Run the test
             timeout = test_config.get('timeout', 7200)  # 2 hours default
             if setup.sps_profile:
                 # When profiling, give the browser some extra time
                 # to dump the profile.
                 timeout += 5 * 60
@@ -240,12 +243,12 @@ class TTest(object):
                                    test_config['name'])
 
         # include global (cross-cycle) counters
         test_results.all_counter_results.extend(
             [{key: value} for key, value in global_counters.items()]
         )
         for c in test_results.all_counter_results:
             for key, value in c.items():
-                logging.debug("COUNTER %r: %s", key, value)
+                LOG.debug("COUNTER %r: %s" % (key, value))
 
         # return results
         return test_results
--- a/testing/talos/talos/utils.py
+++ b/testing/talos/talos/utils.py
@@ -4,23 +4,25 @@
 
 """Utility functions for Talos"""
 
 import os
 import time
 import urlparse
 import string
 import urllib
-import logging
 import json
 import re
 import platform
 
+from mozlog import get_proxy_logger
+
 # directory of this file for use with interpolatePath()
 here = os.path.dirname(os.path.realpath(__file__))
+LOG = get_proxy_logger()
 
 
 def _get_platform():
     # get the platform we're interested in. Note that the values
     # are used in TTest historically, this is why they are not really friendly.
     # TODO: give some user friendly values
     if platform.system() == "Linux":
         return 'linux_'
@@ -48,23 +50,16 @@ class Timer(object):
     def start(self):
         self._start_time = time.time()
 
     def elapsed(self):
         seconds = time.time() - self._start_time
         return time.strftime("%H:%M:%S", time.gmtime(seconds))
 
 
-def startLogger(levelChoice):
-    # declare and define global logger object to send logging messages to
-    log_levels = {'debug': logging.DEBUG, 'info': logging.INFO}
-    logging.basicConfig(format='%(asctime)-15s %(levelname)s : %(message)s',
-                        level=log_levels[levelChoice])
-
-
 class TalosError(Exception):
     "Errors found while running the talos harness."
 
 
 class TalosRegression(Exception):
     """When a regression is detected at runtime, report it properly
        Currently this is a simple definition so we can detect the class type
     """
@@ -174,17 +169,16 @@ def GenerateBrowserCommandLine(browser_p
             url += '?' + urllib.urlencode(profiling_info)
 
     command_args.extend(url.split(' '))
 
     # Handle media performance tests
     if url.find('media_manager.py') != -1:
         command_args = url.split(' ')
 
-    logging.debug("command line: %s", ' '.join(command_args))
     return command_args
 
 
 def indexed_items(itr):
     """
     Generator that allows us to figure out which item is the last one so
     that we can serialize this data properly
     """