Bug 1264772 - Fixup writeruntimes.py; r=ahal
authorDan Minor <dminor@mozilla.com>
Thu, 14 Apr 2016 16:01:14 -0400
changeset 331742 9bc41e65fdd2a1c575c7528066edab4bec2545ab
parent 331741 979bd96a0deea8f2dd7f5d341dce7e5dc46d11f2
child 331743 28c25f010f9d883753d207a67a7daf6729631eab
push id6048
push userkmoir@mozilla.com
push dateMon, 06 Jun 2016 19:02:08 +0000
treeherdermozilla-beta@46d72a56c57d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1264772 - Fixup writeruntimes.py; r=ahal This fixes a few typos in the script and updates the README with instructions on how to run it. MozReview-Commit-ID: 7XVgTIFZR2M
--- a/testing/runtimes/README.md
+++ b/testing/runtimes/README.md
@@ -4,8 +4,16 @@ Test Runtimes
 These files contain test runtimes for various suites across different platforms. Each JSON file
 corresponds to a single test job in production and has the following format:
     { '<test id>': <average runtime> }
 These files are being used to normalize chunk durations so all chunks take roughly the same length
 of time. They are still experimental and their format and/or file structure are subject to change
 without notice.
+Generating a Test Runtime File
+The writeruntimes.py script can be used to generate a runtime file. You must
+specify the suite for which the runtimes are to be generated, e.g.
+    writeruntimes.py -s mochitest-media
--- a/testing/runtimes/writeruntimes.py
+++ b/testing/runtimes/writeruntimes.py
@@ -52,42 +52,42 @@ def write_runtimes(data, suite, indir=he
     runtimes = []
     for result in data.itervalues():
         duration = int(result * 1000) if result else 0
         if duration:
     threshold = runtimes[int(len(runtimes) * PERCENTILE)]
-    # split the durations into two groups; ommitted and specified
-    ommitted = []
+    # split the durations into two groups; omitted and specified
+    omitted = []
     specified = indata if indata else {}
     current_tests = []
     for test, duration in data.iteritems():
         duration = int(duration * 1000) if duration else 0
         if duration > 0 and duration < threshold:
-            ommitted.append(duration)
+            omitted.append(duration)
             if test in specified:
                 del specified[test]
         elif duration >= threshold and test != "automation.py":
             original = specified.get(test, 0)
             if not original or abs(original - duration) > (original/20):
                 # only write new data if it's > 20% different than original
                 specified[test] = duration
     # delete any test references no longer needed
     to_delete = []
     for test in specified:
         if test not in current_tests:
     for test in to_delete:
         del specified[test]
-    avg = int(sum(ommitted)/len(ommitted))
+    avg = int(sum(omitted)/len(omitted))
     results = {'excluded_test_average': avg,
                'runtimes': specified}
     with open(outfilename, 'w') as f:
         f.write(json.dumps(results, indent=2, sort_keys=True))
@@ -103,17 +103,17 @@ def cli(args=sys.argv[1:]):
         help="Comma separated list of platforms from which to generate data.")
     parser.add_argument('-s', '--suite', dest='suite', default=None,
         help="Suite for which to generate data.")
     args = parser.parse_args(args)
     if not args.suite:
-        raise ValueError("Must specify suite with the -u argument")
+        raise ValueError("Must specify suite with the -s argument")
     if ',' in args.suite:
         raise ValueError("Passing multiple suites is not supported")
     if args.platforms:
         args.platforms = args.platforms.split(',')
     data = query_activedata(args.suite, args.platforms)
     write_runtimes(data, args.suite, indir=args.indir, outdir=args.outdir)