Bug 1253341 - support --framework for talos. r=wlach
authorJoel Maher <jmaher@mozilla.com>
Wed, 16 Mar 2016 08:24:45 -0700
changeset 289023 101e18ca4a12f984d5256fa21c6c8d8a2036dc47
parent 289022 a5ded32ae90632b88d807d40d175d23d19a0bfc5
child 289024 4edfc0a8841211e16ecee6a716eccb06b262704a
push id30096
push usercbook@mozilla.com
push dateThu, 17 Mar 2016 10:00:34 +0000
treeherdermozilla-central@fefe2bc3e54f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerswlach
bugs1253341
milestone48.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1253341 - support --framework for talos. r=wlach MozReview-Commit-ID: 881jtz1ljkj
testing/mozharness/mozharness/mozilla/testing/talos.py
testing/talos/talos/cmdline.py
testing/talos/talos/config.py
testing/talos/talos/output.py
testing/talos/talos/results.py
testing/talos/talos/ttest.py
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -165,17 +165,22 @@ class Talos(TestingMixin, MercurialScrip
     # We accept some configuration options from the try commit message in the format mozharness: <options>
     # Example try commit message:
     #   mozharness: --spsProfile try: <stuff>
     def query_sps_profile_options(self):
         sps_results = []
         if self.buildbot_config:
             # this is inside automation
             # now let's see if we added spsProfile specs in the commit message
-            junk, junk, opts = self.buildbot_config['sourcestamp']['changes'][-1]['comments'].partition('mozharness:')
+            try:
+                junk, junk, opts = self.buildbot_config['sourcestamp']['changes'][-1]['comments'].partition('mozharness:')
+            except IndexError:
+                # when we don't have comments on changes (bug 1255187)
+                opts = None
+
             if opts:
               opts = re.sub(r'\w+:.*', '', opts).strip().split(' ')
               if "--spsProfile" in opts:
                   # overwrite whatever was set here.
                   self.sps_profile = True
               try:
                     idx = opts.index('--spsProfileInterval')
                     if len(opts) > idx + 1:
@@ -232,16 +237,18 @@ class Talos(TestingMixin, MercurialScrip
         if binary_path.endswith('.exe'):
             binary_path = binary_path[:-4]
         # options overwritten from **kw
         kw_options = {'executablePath': binary_path}
         if 'suite' in self.config:
             kw_options['suite'] = self.config['suite']
         if self.config.get('title'):
             kw_options['title'] = self.config['title']
+            if kw_options['title'].startswith('tst-linux64-spot'):
+                kw_options['framework'] = 'talos-aws'
         if self.config.get('branch'):
             kw_options['branchName'] = self.config['branch']
         if self.symbols_path:
             kw_options['symbolsPath'] = self.symbols_path
         kw_options.update(kw)
         # talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
         tests = kw_options.get('activeTests')
         if tests and not isinstance(tests, basestring):
--- a/testing/talos/talos/cmdline.py
+++ b/testing/talos/talos/cmdline.py
@@ -129,16 +129,20 @@ def create_parser(mach_interface=False):
     add_arg('--sourcestamp',
             help='Specify the hg revision or sourcestamp for the changeset'
                  ' we are testing.  This will use the value found in'
                  ' application.ini if it is not specified.')
     add_arg('--repository',
             help='Specify the url for the repository we are testing. '
                  'This will use the value found in application.ini if'
                  ' it is not specified.')
+    add_arg('--framework',
+            help='Will post to the specified framework for Perfherder. '
+                 'Default "talos".  Used primarily for experiments on '
+                 'new platforms')
     add_arg('--print-tests', action=_ListTests,
             help="print available tests")
     add_arg('--print-suites', action=_ListSuite,
             help="list available suites")
 
     add_logging_group(parser)
     return parser
 
--- a/testing/talos/talos/config.py
+++ b/testing/talos/talos/config.py
@@ -409,16 +409,17 @@ def get_browser_config(config):
     required = ('preferences', 'extensions', 'browser_path', 'browser_wait',
                 'extra_args', 'buildid', 'env', 'init_url', 'webserver')
     optional = {'bcontroller_config': '${talos}/bcontroller.json',
                 'branch_name': '',
                 'child_process': 'plugin-container',
                 'develop': False,
                 'e10s': False,
                 'process': '',
+                'framework': 'talos',
                 'repository': None,
                 'sourcestamp': None,
                 'symbols_path': None,
                 'test_name_extension': '',
                 'test_timeout': 1200,
                 'xperf_path': None,
                 'error_filename': None,
                 }
--- a/testing/talos/talos/output.py
+++ b/testing/talos/talos/output.py
@@ -156,17 +156,17 @@ class PerfherderOutput(Output):
             return filter.geometric_mean([i for i, j in vals])
         else:
             return filter.mean([i for i, j in vals])
 
     def __call__(self):
         suites = []
         test_results = {
             'framework': {
-                'name': 'talos',
+                'name': self.results.results[0].framework,
             },
             'suites': suites,
         }
 
         for test in self.results.results:
             # serialize test results
             tsresult = None
             if not test.using_xperf:
--- a/testing/talos/talos/results.py
+++ b/testing/talos/talos/results.py
@@ -79,23 +79,23 @@ class TalosResults(object):
 
         if tbpl_output:
             print "TinderboxPrint: TalosResult: %s" % json.dumps(tbpl_output)
 
 
 class TestResults(object):
     """container object for all test results across cycles"""
 
-    def __init__(self, test_config, global_counters=None, extensions=None):
+    def __init__(self, test_config, global_counters=None, framework=None):
         self.results = []
         self.test_config = test_config
         self.format = None
         self.global_counters = global_counters or {}
         self.all_counter_results = []
-        self.extensions = extensions
+        self.framework = framework
         self.using_xperf = False
 
     def name(self):
         return self.test_config['name']
 
     def extension(self):
         return self.test_config['test_name_extension']
 
--- a/testing/talos/talos/ttest.py
+++ b/testing/talos/talos/ttest.py
@@ -96,17 +96,18 @@ class TTest(object):
             setup.env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
             setup.env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = '20'
             setup.env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = '10'
             global_counters['responsiveness'] = []
 
         # instantiate an object to hold test results
         test_results = results.TestResults(
             test_config,
-            global_counters
+            global_counters,
+            browser_config.get('framework')
         )
 
         for i in range(test_config['cycles']):
             LOG.info("Running cycle %d/%d for %s test..."
                      % (i+1, test_config['cycles'], test_config['name']))
 
             # remove the browser  error file
             mozfile.remove(browser_config['error_filename'])