Bug 1294466 - Check that the output of all JITFLAGS is the same on jit-tests. r=terrence
authorJinank Jain <jinank94@gmail.com>
Mon, 15 Aug 2016 13:04:57 -0400
changeset 350840 772ec8c912ac30334e69ecc123a8ba9229334925
parent 350839 6a868ef45569757a81c1e842e9e73a783d36308c
child 350841 a0085eb5ffe746352ea4cf4f357344443e8ddf90
push id6570
push userraliiev@mozilla.com
push dateMon, 14 Nov 2016 12:26:13 +0000
treeherdermozilla-beta@f455459b2ae5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1294466
milestone51.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1294466 - Check that the output of all JITFLAGS is the same on jit-tests. r=terrence
js/src/jit-test/jit_test.py
js/src/tests/lib/jittests.py
--- a/js/src/jit-test/jit_test.py
+++ b/js/src/jit-test/jit_test.py
@@ -90,16 +90,18 @@ def main(argv):
                   choices=['automation', 'none'],
                   help='Output format. Either automation or none'
                   ' (default %default).')
     op.add_option('--args', dest='shell_args', default='',
                   help='extra args to pass to the JS shell')
     op.add_option('-w', '--write-failures', dest='write_failures',
                   metavar='FILE',
                   help='Write a list of failed tests to [FILE]')
+    op.add_option('-C', '--check-output', action='store_true', dest='check_output',
+                  help='Run tests to check output for different jit-flags')
     op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                   help='Run test files listed in [FILE]')
     op.add_option('-R', '--retest', dest='retest', metavar='FILE',
                   help='Retest using test list file [FILE]')
     op.add_option('-g', '--debug', action='store_const', const='gdb', dest='debugger',
                   help='Run a single test under the gdb debugger')
     op.add_option('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
                   help='Run a single test under the rr debugger')
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -492,16 +492,17 @@ def create_progressbar(num_tests, option
         ]
         return ProgressBar(num_tests, fmt)
     return NullProgressBar()
 
 def process_test_results(results, num_tests, pb, options):
     failures = []
     timeouts = 0
     complete = False
+    output_dict = {}
     doing = 'before starting'
 
     if num_tests == 0:
         pb.finish(True)
         complete = True
         return print_test_summary(num_tests, failures, complete, doing, options)
 
     try:
@@ -519,16 +520,23 @@ def process_test_results(results, num_te
                 sys.stdout.write(res.out)
                 sys.stdout.write(res.err)
                 sys.stdout.write('Exit code: {}\n'.format(res.rc))
 
             if res.test.valgrind and not show_output:
                 pb.beginline()
                 sys.stdout.write(res.err)
 
+            if options.check_output:
+                if res.test.path in output_dict.keys():
+                    if output_dict[res.test.path] != res.out:
+                        pb.message("FAIL - OUTPUT DIFFERS {}".format(res.test.relpath_tests))
+                else:
+                    output_dict[res.test.path] = res.out
+
             doing = 'after {}'.format(res.test.relpath_tests)
             if not ok:
                 failures.append(res)
                 if res.timed_out:
                     pb.message("TIMEOUT - {}".format(res.test.relpath_tests))
                     timeouts += 1
                 else:
                     pb.message("FAIL - {}".format(res.test.relpath_tests))