Back out b17c8b926585, seems to not work any more. :-\
authorJeff Walden <jwalden@mit.edu>
Tue, 27 Jul 2010 18:23:30 -0700
changeset 48608 84b1306e4431ba6872bde0b275e1cfe515efdbf8
parent 48607 0e1199f92cdf866c6d3fc6dedcf6ea23fa4924d6
child 48609 77ebf0c91e8da80654ef3555292c387c53996a5f
push id14748
push userrsayre@mozilla.com
push dateSun, 01 Aug 2010 00:33:23 +0000
treeherdermozilla-central@f0df797bb2a9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone2.0b3pre
backs outb17c8b9265858f3224f43c35df6b23d8bb94a490
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Back out b17c8b926585, seems to not work any more. :-\
js/src/tests/ecma/Date/jstests.list
js/src/tests/jstests.py
js/src/tests/manifest.py
js/src/tests/tests.py
layout/tools/reftest/README.txt
layout/tools/reftest/reftest-cmdline.js
layout/tools/reftest/reftest.js
layout/tools/reftest/runreftest.py
--- a/js/src/tests/ecma/Date/jstests.list
+++ b/js/src/tests/ecma/Date/jstests.list
@@ -144,16 +144,16 @@ script 15.9.5.37-5.js
 script 15.9.5.4-1.js
 script 15.9.5.4-2-n.js
 script 15.9.5.5.js
 script 15.9.5.6.js
 script 15.9.5.7.js
 script 15.9.5.8.js
 script 15.9.5.9.js
 script 15.9.5.js
-slow script dst-offset-caching-1-of-8.js
-slow script dst-offset-caching-2-of-8.js
-slow script dst-offset-caching-3-of-8.js
-slow script dst-offset-caching-4-of-8.js
-slow script dst-offset-caching-5-of-8.js
-slow script dst-offset-caching-6-of-8.js
-slow script dst-offset-caching-7-of-8.js
-slow script dst-offset-caching-8-of-8.js
+script dst-offset-caching-1-of-8.js
+script dst-offset-caching-2-of-8.js
+script dst-offset-caching-3-of-8.js
+script dst-offset-caching-4-of-8.js
+script dst-offset-caching-5-of-8.js
+script dst-offset-caching-6-of-8.js
+script dst-offset-caching-7-of-8.js
+script dst-offset-caching-8-of-8.js
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -222,18 +222,16 @@ if __name__ == '__main__':
     op.add_option('--valgrind', dest='valgrind', action='store_true',
                   help='run tests in valgrind')
     op.add_option('--valgrind-args', dest='valgrind_args',
                   help='extra args to pass to valgrind')
     op.add_option('-c', '--check-manifest', dest='check_manifest', action='store_true',
                   help='check for test files not listed in the manifest')
     op.add_option('--failure-file', dest='failure_file',
                   help='write tests that have not passed to the given file')
-    op.add_option('--run-slow-tests', dest='run_slow_tests', action='store_true',
-                  help='run particularly slow tests as well as average-speed tests')
     (OPTIONS, args) = op.parse_args()
     if len(args) < 1:
         if not OPTIONS.check_manifest:
             op.error('missing JS_SHELL argument')
         JS, args = None, []
     else:
         JS, args = args[0], args[1:]
     # Convert to an absolute path so we can run JS from a different directory.
@@ -307,19 +305,16 @@ if __name__ == '__main__':
 
     if not OPTIONS.random:
         test_list = [ _ for _ in test_list if not _.random ]
 
     if OPTIONS.run_only_skipped:
         OPTIONS.run_skipped = True
         test_list = [ _ for _ in test_list if not _.enable ]
 
-    if not OPTIONS.run_slow_tests:
-        test_list = [ _ for _ in test_list if not _.slow ]
-
     if OPTIONS.debug and test_list:
         if len(test_list) > 1:
             print('Multiple tests match command line arguments, debugger can only run one')
             for tc in test_list:
                 print('    %s'%tc.path)
             sys.exit(2)
 
         cmd = test_list[0].get_command(TestTask.js_cmd_prefix)
--- a/js/src/tests/manifest.py
+++ b/js/src/tests/manifest.py
@@ -105,17 +105,16 @@ def parse(filename, xul_tester, reldir =
         elif parts[0] == 'url-prefix':
             # Doesn't apply to shell tests
             pass
         else:
             script = None
             enable = True
             expect = True
             random = False
-            slow = False
 
             pos = 0
             while pos < len(parts):
                 if parts[pos] == 'fails':
                     expect = False
                     pos += 1
                 elif parts[pos] == 'skip':
                     expect = enable = False
@@ -140,19 +139,16 @@ def parse(filename, xul_tester, reldir =
                 elif parts[pos].startswith('random-if'):
                     cond = parts[pos][len('random-if('):-1]
                     if xul_tester.test(cond):
                         random = True
                     pos += 1
                 elif parts[pos] == 'script':
                     script = parts[pos+1]
                     pos += 2
-                elif parts[pos] == 'slow':
-                    slow = True
-                    pos += 1
                 else:
                     print 'warning: invalid manifest line element "%s"'%parts[pos]
                     pos += 1
 
             assert script is not None
             ans.append(TestCase(os.path.join(reldir, script), 
-                                enable, expect, random, slow))
+                                enable, expect, random))
     return ans
--- a/js/src/tests/tests.py
+++ b/js/src/tests/tests.py
@@ -82,33 +82,30 @@ class Test(object):
     def run(self, js_cmd_prefix, timeout=30.0):
         cmd = self.get_command(js_cmd_prefix)
         out, err, rc, dt = run_cmd(cmd, timeout)
         return TestOutput(self, cmd, out, err, rc, dt);
 
 class TestCase(Test):
     """A test case consisting of a test and an expected result."""
 
-    def __init__(self, path, enable, expect, random, slow):
+    def __init__(self, path, enable, expect, random):
         Test.__init__(self, path)
         self.enable = enable     # bool: True => run test, False => don't run
         self.expect = expect     # bool: expected result, True => pass
         self.random = random     # bool: True => ignore output as 'random'
-        self.slow = slow         # bool: True => test may run slowly
 
     def __str__(self):
         ans = self.path
         if not self.enable:
             ans += ', skip'
         if not self.expect:
             ans += ', fails'
         if self.random:
             ans += ', random'
-        if self.slow:
-            ans += ', slow'
         return ans
 
 class TestOutput:
     """Output from a test run."""
     def __init__(self, test, cmd, out, err, rc, dt):
         self.test = test   # Test
         self.cmd = cmd     # str:   command line of test
         self.out = out     # str:   stdout
--- a/layout/tools/reftest/README.txt
+++ b/layout/tools/reftest/README.txt
@@ -69,27 +69,16 @@ 2. A test item
             'skip' is preferred to simply commenting out the test because we
             want to report the test failure at the end of the test run.
 
       skip-if(condition) If the condition is met, the test is not run. This is
                          useful if, for example, the test crashes only on a
                          particular platform (i.e. it allows us to get test
                          coverage on the other platforms).
 
-      slow  The test may take a long time to run, so run it if slow tests are
-            either enabled or not disabled (test manifest interpreters may
-            choose whether or not to run such tests by default).
-
-      slow-if(condition) If the condition is met, the test is treated as if
-                         'slow' had been specified.  This is useful for tests
-                         which are slow only on particular platforms (e.g. a
-                         test which exercised out-of-memory behavior might be
-                         fast on a 32-bit system but inordinately slow on a
-                         64-bit system).
-
       asserts(count)
           Loading the test and reference is known to assert exactly
           count times.
           NOTE: An asserts() notation with a non-zero count or maxCount
           suppresses use of a cached canvas for the test with the
           annotation.  However, if later occurrences of the same test
           are not annotated, they will use the cached canvas
           (potentially from the load that asserted).  This allows
--- a/layout/tools/reftest/reftest-cmdline.js
+++ b/layout/tools/reftest/reftest-cmdline.js
@@ -74,23 +74,16 @@ RefTestCmdLineHandler.prototype =
 
     try {
       var nocache = cmdLine.handleFlag("reftestnocache", false);
       args.nocache = nocache;
     }
     catch (e) {
     }
 
-    try {
-      var skipslowtests = cmdLine.handleFlag("reftestskipslowtests", false);
-      args.skipslowtests = skipslowtests;
-    }
-    catch (e) {
-    }
-
     /* Ignore the platform's online/offline status while running reftests. */
     var ios = Components.classes["@mozilla.org/network/io-service;1"]
               .getService(Components.interfaces.nsIIOService2);
     ios.manageOfflineStatus = false;
     ios.offline = false;
 
     /* Force sRGB as an output profile for color management before we load a
        window. */
--- a/layout/tools/reftest/reftest.js
+++ b/layout/tools/reftest/reftest.js
@@ -85,17 +85,16 @@ var gTestResults = {
   UnexpectedPass: 0,
   AssertionUnexpected: 0,
   AssertionUnexpectedFixed: 0,
   // Known problems...
   KnownFail : 0,
   AssertionKnown: 0,
   Random : 0,
   Skip: 0,
-  Slow: 0,
 };
 var gTotalTests = 0;
 var gState;
 var gCurrentURL;
 var gFailureTimeout = null;
 var gFailureReason;
 var gServer;
 var gCount = 0;
@@ -123,19 +122,16 @@ const EXPECTED_FAIL = 1;
 const EXPECTED_RANDOM = 2;
 const EXPECTED_DEATH = 3;  // test must be skipped to avoid e.g. crash/hang
 
 const gProtocolRE = /^\w+:/;
 
 var HTTP_SERVER_PORT = 4444;
 const HTTP_SERVER_PORTS_TO_TRY = 50;
 
-// whether to run slow tests or not
-var gRunSlowTests = true;
-
 // whether we should skip caching canvases
 var gNoCanvasCache = false;
 
 var gRecycledCanvases = new Array();
 
 // By default we just log to stdout
 var gDumpLog = dump;
 
@@ -258,19 +254,16 @@ function StartTests()
 {
     try {
         // Need to read the manifest once we have the final HTTP_SERVER_PORT.
         var args = window.arguments[0].wrappedJSObject;
 
         if ("nocache" in args && args["nocache"])
             gNoCanvasCache = true;
 
-        if ("skipslowtests" in args && args.skipslowtests)
-            gRunSlowTests = false;
-
         ReadTopManifest(args.uri);
         BuildUseCounts();
 
         if (gTotalChunks > 0 && gThisChunk > 0) {
           var testsPerChunk = gURLs.length / gTotalChunks;
           var start = Math.round((gThisChunk-1) * testsPerChunk);
           var end = Math.round(gThisChunk * testsPerChunk);
           gURLs = gURLs.slice(start, end);
@@ -662,29 +655,20 @@ function ServeFiles(manifestURL, depth, 
     }
 
     return files.map(FileToURI);
 }
 
 function StartCurrentTest()
 {
     // make sure we don't run tests that are expected to kill the browser
-    while (gURLs.length > 0) {
-        var test = gURLs[0];
-        if (test.expected == EXPECTED_DEATH) {
-            ++gTestResults.Skip;
-            gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec + " | (SKIP)\n");
-            gURLs.shift();
-        } else if (test.slow && !gRunSlowTests) {
-            ++gTestResults.Slow;
-            gDumpLog("REFTEST TEST-KNOWN-SLOW | " + test.url1.spec + " | (SLOW)\n");
-            gURLs.shift();
-        } else {
-            break;
-        }
+    while (gURLs.length > 0 && gURLs[0].expected == EXPECTED_DEATH) {
+        ++gTestResults.Skip;
+        gDumpLog("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].url1.spec + " | (SKIP)\n");
+        gURLs.shift();
     }
 
     if (gURLs.length == 0) {
         DoneTests();
     }
     else {
         var currentTest = gTotalTests - gURLs.length;
         document.title = "reftest: " + currentTest + " / " + gTotalTests +
@@ -737,23 +721,22 @@ function DoneTests()
     gDumpLog("REFTEST INFO | Unexpected: " + count + " (" +
          gTestResults.UnexpectedFail + " unexpected fail, " +
          gTestResults.UnexpectedPass + " unexpected pass, " +
          gTestResults.AssertionUnexpected + " unexpected asserts, " +
          gTestResults.AssertionUnexpectedFixed + " unexpected fixed asserts, " +
          gTestResults.FailedLoad + " failed load, " +
          gTestResults.Exception + " exception)\n");
     count = gTestResults.KnownFail + gTestResults.AssertionKnown +
-            gTestResults.Random + gTestResults.Skip + gTestResults.Slow;
-    dump("REFTEST INFO | Known problems: " + count + " (" +
+            gTestResults.Random + gTestResults.Skip;
+    gDumpLog("REFTEST INFO | Known problems: " + count + " (" +
          gTestResults.KnownFail + " known fail, " +
          gTestResults.AssertionKnown + " known asserts, " +
          gTestResults.Random + " random, " +
-         gTestResults.Skip + " skipped, " +
-         gTestResults.Slow + " slow)\n");
+         gTestResults.Skip + " skipped)\n");
 
     gDumpLog("REFTEST INFO | Total canvas count = " + gRecycledCanvases.length + "\n");
 
     gDumpLog("REFTEST TEST-START | Shutdown\n");
     function onStopped() {
         goQuitApplication();
     }
     if (gServer)
--- a/layout/tools/reftest/runreftest.py
+++ b/layout/tools/reftest/runreftest.py
@@ -224,21 +224,16 @@ class ReftestOptions(OptionParser):
     defaults["thisChunk"] = None
 
     self.add_option("--log-file",
                     action = "store", type = "string", dest = "logFile",
                     default = None,
                     help = "file to log output to in addition to stdout")
     defaults["logFile"] = None
  
-    self.add_option("--skip-slow-tests",
-                    dest = "skipSlowTests", action = "store_true",
-                    help = "skip tests marked as slow when running")
-    defaults["skipSlowTests"] = False
-
     self.set_defaults(**defaults)
 
 def main():
   automation = Automation()
   parser = ReftestOptions(automation)
   reftest = RefTest(automation)
 
   options, args = parser.parse_args()