Bug 1014125 - Bisection Base Patch. r=ahal
authorVaibhav Agrawal <vaibhavmagarwal@gmail.com>
Fri, 04 Jul 2014 13:55:00 +0200
changeset 192525 c3c48c1861b1562594d2cc209b50e6545c77d80d
parent 192524 37cfa5ad20646e1e893d45ba8757d0d00818aabe
child 192526 e65266c480e43171c997e89847114bc2a12341d5
push id45876
push usercbook@mozilla.com
push dateMon, 07 Jul 2014 11:48:37 +0000
treeherdermozilla-inbound@c3c48c1861b1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal
bugs1014125
milestone33.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1014125 - Bisection Base Patch. r=ahal
build/automation.py.in
testing/mochitest/Makefile.in
testing/mochitest/bisection.py
testing/mochitest/mach_commands.py
testing/mochitest/mochitest_options.py
testing/mochitest/runtests.py
testing/mochitest/runtestsb2g.py
testing/mochitest/runtestsremote.py
--- a/build/automation.py.in
+++ b/build/automation.py.in
@@ -814,17 +814,17 @@ class Automation(object):
   def checkForCrashes(self, minidumpDir, symbolsPath):
     return mozcrash.check_for_crashes(minidumpDir, symbolsPath, test_name=self.lastTestSeen)
 
   def runApp(self, testURL, env, app, profileDir, extraArgs,
              runSSLTunnel = False, utilityPath = None,
              xrePath = None, certPath = None,
              debuggerInfo = None, symbolsPath = None,
              timeout = -1, maxTime = None, onLaunch = None,
-             webapprtChrome = False, screenshotOnFail=False, testPath=None):
+             webapprtChrome = False, screenshotOnFail=False, testPath=None, bisectChunk=None):
     """
     Run the app, log the duration it took to execute, return the status code.
     Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
     """
 
     if utilityPath == None:
       utilityPath = self.DIST_BIN
     if xrePath == None:
--- a/testing/mochitest/Makefile.in
+++ b/testing/mochitest/Makefile.in
@@ -8,16 +8,17 @@ MOZ_CHROME_FILE_FORMAT = jar
 DIST_FILES = install.rdf
 
 # Used in install.rdf
 USE_EXTENSION_MANIFEST = 1
 
 # files that get copied into $objdir/_tests/
 SERV_FILES = 	\
 		runtests.py \
+		bisection.py \
 		automation.py \
 		runtestsb2g.py \
 		runtestsremote.py \
 		runtestsvmware.py \
 		mochitest_options.py \
 		manifest.webapp \
 		$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanager.py \
 		$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanagerADB.py \
new file mode 100644
--- /dev/null
+++ b/testing/mochitest/bisection.py
@@ -0,0 +1,210 @@
+import os
+import math
+import mozinfo
+
+class Bisect(object):
+    "Class for creating, bisecting and summarizing for --bisect-chunk option."
+
+    def __init__(self, harness):
+        super(Bisect, self).__init__()
+        self.summary = []
+        self.contents = {}
+        self.testRoot = harness.testRoot
+        self.testRootAbs = harness.testRootAbs
+
+    def setup(self, tests):
+        "This method is used to initialize various variables that are required for test bisection"
+        status = 0
+        self.contents.clear()
+        # We need totalTests key in contents for sanity check
+        self.contents['totalTests'] = tests
+        self.contents['tests'] = tests
+        self.contents['loop'] = 0
+        return status
+
+    def reset(self, expectedError, result):
+        "This method is used to initialize self.expectedError and self.result for each loop in runtests."
+        self.expectedError = expectedError
+        self.result = result
+
+    def get_test_chunk(self, options, tests):
+        "This method is used to return the chunk of test that is to be run"
+        if not options.totalChunks or not options.thisChunk:
+            return tests
+
+        # The logic here is same as chunkifyTests.js, we need this for bisecting tests.
+        if options.chunkByDir:
+            tests_by_dir = {}
+            test_dirs = []
+            for test in tests:
+                directory = test.split("/")
+                directory = directory[0:min(options.chunkByDir, len(directory)-1)]
+                directory = "/".join(directory)
+
+                if not directory in tests_by_dir:
+                    tests_by_dir[directory] = [test]
+                    test_dirs.append(directory)
+                else:
+                    tests_by_dir[directory].append(test)
+
+            tests_per_chunk = float(len(test_dirs)) / options.totalChunks
+            start = int(round((options.thisChunk-1) * tests_per_chunk))
+            end = int(round((options.thisChunk) * tests_per_chunk))
+            test_dirs = test_dirs[start:end]
+            return_tests = []
+            for directory in test_dirs:
+                return_tests += tests_by_dir[directory]
+
+        else:
+            tests_per_chunk = float(len(tests)) / options.totalChunks
+            start = int(round((options.thisChunk-1) * tests_per_chunk))
+            end = int(round(options.thisChunk * tests_per_chunk))
+            return_tests = tests[start:end]
+
+        options.totalChunks = None
+        options.thisChunk = None
+        options.chunkByDir = None
+        return return_tests
+
+    def get_tests_for_bisection(self, options, tests):
+        "Make a list of tests for bisection from a given list of tests"
+        tests = self.get_test_chunk(options, tests)
+        bisectlist = []
+        for test in tests:
+            bisectlist.append(test)
+            if test.endswith(options.bisectChunk):
+                break
+
+        return bisectlist
+
+    def pre_test(self, options, tests, status):
+        "This method is used to call other methods for setting up variables and getting the list of tests for bisection."
+        if options.bisectChunk == "default":
+            return tests
+        # The second condition in 'if' is required to verify that the failing test is the last one.
+        elif 'loop' not in self.contents or not self.contents['tests'][-1].endswith(options.bisectChunk):
+            tests = self.get_tests_for_bisection(options, tests)
+            status = self.setup(tests)
+
+        return self.next_chunk_reverse(options, status)
+
+    def post_test(self, options, expectedError, result):
+        "This method is used to call other methods to summarize results and check whether a sanity check is done or not."
+        self.reset(expectedError, result)
+        status = self.summarize_chunk(options)
+        # Check whether sanity check has to be done. Also it is necessary to check whether options.bisectChunk is present
+        # in self.expectedError as we do not want to run if it is "default".
+        if status == -1 and options.bisectChunk in self.expectedError:
+            # In case we have a debug build, we don't want to run a sanity check, will take too much time.
+            if mozinfo.info['debug']:
+                return status
+
+            testBleedThrough = self.contents['testsToRun'][0]
+            tests = self.contents['totalTests']
+            tests.remove(testBleedThrough)
+            # To make sure that the failing test is dependent on some other test.
+            if options.bisectChunk in testBleedThrough:
+                return status
+
+            status = self.setup(tests)
+            self.summary.append("Sanity Check:")
+
+        return status
+
+    def next_chunk_reverse(self, options, status):
+        "This method is used to bisect the tests in a reverse search fashion."
+
+        # Base Cases.
+        if self.contents['loop'] == 0:
+            self.contents['loop'] += 1
+            self.contents['testsToRun'] = self.contents['tests']
+            return self.contents['testsToRun']
+        if self.contents['loop'] == 1:
+            self.contents['loop'] += 1
+            self.contents['testsToRun'] = [self.contents['tests'][-1]]
+            return self.contents['testsToRun']
+
+        if 'result' in self.contents:
+            if self.contents['result'] == "PASS":
+                chunkSize = self.contents['end'] - self.contents['start']
+                self.contents['end'] = self.contents['start'] - 1
+                self.contents['start'] = self.contents['end'] - chunkSize
+
+        # self.contents['result'] will be expected error only if it fails.
+            elif self.contents['result'] == "FAIL":
+                self.contents['tests'] = self.contents['testsToRun']
+                status = 1 # for initializing
+
+        # initialize
+        if status:
+            totalTests = len(self.contents['tests'])
+            chunkSize = int(math.ceil(totalTests / 10.0))
+            self.contents['start'] = totalTests - chunkSize - 1
+            self.contents['end'] = totalTests - 2
+
+        start = self.contents['start']
+        end = self.contents['end'] + 1
+        self.contents['testsToRun'] = self.contents['tests'][start:end]
+        self.contents['testsToRun'].append(self.contents['tests'][-1])
+        self.contents['loop'] += 1
+
+        return self.contents['testsToRun']
+
+    def summarize_chunk(self, options):
+        "This method is used summarize the results after the list of tests is run."
+        if options.bisectChunk == "default":
+            # if no expectedError that means all the tests have successfully passed.
+            if len(self.expectedError) == 0:
+                return -1
+            options.bisectChunk = self.expectedError.keys()[0]
+            self.summary.append("\tFound Error in test: %s" % options.bisectChunk)
+            return 0
+
+        # If options.bisectChunk is not in self.result then we need to move to the next run.
+        if options.bisectChunk not in self.result:
+            return -1
+
+        self.summary.append("\tPass %d:" % self.contents['loop'])
+        if len(self.contents['testsToRun']) > 1:
+            self.summary.append("\t\t%d test files(start,end,failing). [%s, %s, %s]" % (len(self.contents['testsToRun']), self.contents['testsToRun'][0], self.contents['testsToRun'][-2], self.contents['testsToRun'][-1]))
+        else:
+            self.summary.append("\t\t1 test file [%s]" % self.contents['testsToRun'][0])
+
+        if self.result[options.bisectChunk] == "PASS":
+            self.summary.append("\t\tno failures found.")
+            if self.contents['loop'] == 1:
+                status = -1
+            elif self.contents['loop'] == 2:
+                status = 1
+            else:
+                self.contents['result'] = "PASS"
+                status = 0
+
+        elif self.result[options.bisectChunk] == "FAIL":
+            if 'expectedError' not in self.contents:
+                self.summary.append("\t\t%s failed." % self.contents['testsToRun'][-1])
+                self.contents['expectedError'] = self.expectedError[options.bisectChunk]
+                status = 0
+
+            elif self.expectedError[options.bisectChunk] == self.contents['expectedError']:
+                self.summary.append("\t\t%s failed with expected error." % self.contents['testsToRun'][-1])
+                self.contents['result'] = "FAIL"
+                status = 0
+
+                # This code checks for test-bleedthrough. Should work for any algorithm.
+                numberOfTests = len(self.contents['testsToRun'])
+                if numberOfTests < 3:
+                    # This means that only 2 tests are run. Since the last test is the failing test itself therefore the bleedthrough test is the first test
+                    self.summary.append("TEST-BLEEDTHROUGH - found failure, %s" % self.contents['testsToRun'][0])
+                    status = -1
+            else:
+                self.summary.append("\t\t%s failed with different error." % self.contents['testsToRun'][-1])
+                status = -1
+
+        return status
+
+    def print_summary(self):
+        "This method is used to print the recorded summary."
+        print "Bisection summary:"
+        for line in self.summary:
+            print line
--- a/testing/mochitest/mach_commands.py
+++ b/testing/mochitest/mach_commands.py
@@ -193,17 +193,17 @@ class MochitestRunner(MozbuildObject):
 
     def run_desktop_test(self, context, suite=None, test_paths=None, debugger=None,
         debugger_args=None, slowscript=False, screenshot_on_fail = False, shuffle=False, keep_open=False,
         rerun_failures=False, no_autorun=False, repeat=0, run_until_failure=False,
         slow=False, chunk_by_dir=0, total_chunks=None, this_chunk=None,
         jsdebugger=False, debug_on_failure=False, start_at=None, end_at=None,
         e10s=False, dmd=False, dump_output_directory=None,
         dump_about_memory_after_test=False, dump_dmd_after_test=False,
-        install_extension=None, quiet=False, environment=[], app_override=None, runByDir=False,
+        install_extension=None, quiet=False, environment=[], app_override=None, bisectChunk=None, runByDir=False,
         useTestMediaDevices=False, **kwargs):
         """Runs a mochitest.
 
         test_paths are path to tests. They can be a relative path from the
         top source directory, an absolute filename, or a directory containing
         test files.
 
         suite is the type of mochitest to run. It can be one of ('plain',
@@ -319,16 +319,17 @@ class MochitestRunner(MozbuildObject):
         options.startAt = start_at
         options.endAt = end_at
         options.e10s = e10s
         options.dumpAboutMemoryAfterTest = dump_about_memory_after_test
         options.dumpDMDAfterTest = dump_dmd_after_test
         options.dumpOutputDirectory = dump_output_directory
         options.quiet = quiet
         options.environment = environment
+        options.bisectChunk = bisectChunk
         options.runByDir = runByDir
         options.useTestMediaDevices = useTestMediaDevices
 
         options.failureFile = failure_file_path
         if install_extension != None:
             options.extensionsToInstall = [os.path.join(self.topsrcdir,install_extension)]
 
         for k, v in kwargs.iteritems():
@@ -536,16 +537,21 @@ def MochitestCommand(func):
     func = setenv(func)
 
     runbydir = CommandArgument('--run-by-dir', default=False,
                                  action='store_true',
                                  dest='runByDir',
         help='Run each directory in a single browser instance with a fresh profile.')
     func = runbydir(func)
 
+    bisect_chunk = CommandArgument('--bisect-chunk', type=str,
+                                 dest='bisectChunk',
+        help='Specify the failing test name to find the previous tests that may be causing the failure.')
+    func = bisect_chunk(func)
+
     test_media = CommandArgument('--use-test-media-devices', default=False,
                                  action='store_true',
                                  dest='useTestMediaDevices',
         help='Use test media device drivers for media testing.')
     func = test_media(func)
 
     app_override = CommandArgument('--app-override', default=None, action='store',
         help="Override the default binary used to run tests with the path you provide, e.g. " \
--- a/testing/mochitest/mochitest_options.py
+++ b/testing/mochitest/mochitest_options.py
@@ -146,16 +146,23 @@ class MochitestOptions(optparse.OptionPa
         }],
         [["--test-path"],
         { "action": "store",
           "type": "string",
           "dest": "testPath",
           "help": "start in the given directory's tests",
           "default": "",
         }],
+        [["--bisect-chunk"],
+        { "action": "store",
+          "type": "string",
+          "dest": "bisectChunk",
+          "help": "Specify the failing test name to find the previous tests that may be causing the failure.",
+          "default": None,
+        }],
         [["--start-at"],
         { "action": "store",
           "type": "string",
           "dest": "startAt",
           "help": "skip over tests until reaching the given test",
           "default": "",
         }],
         [["--end-at"],
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -24,16 +24,17 @@ import re
 import shutil
 import signal
 import subprocess
 import tempfile
 import time
 import traceback
 import urllib2
 import zipfile
+import bisection
 
 from automationutils import environment, getDebuggerInfo, isURL, KeyValueParseError, parseKeyValue, processLeakLog, dumpScreen, ShutdownLeaks, printstatus, LSANLeaks
 from datetime import datetime
 from manifestparser import TestManifest
 from mochitest_options import MochitestOptions
 from mozprofile import Profile, Preferences
 from mozprofile.permissions import ServerLocations
 from urllib import quote_plus as encodeURIComponent
@@ -448,82 +449,37 @@ class MochitestUtilsMixin(object):
     if os.path.isfile(os.path.join(self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, testPath)) and options.repeat > 0:
       testURL = "/".join([testHost, self.TEST_PATH, os.path.dirname(testPath)])
     if options.chrome or options.a11y:
       testURL = "/".join([testHost, self.CHROME_PATH])
     elif options.browserChrome:
       testURL = "about:blank"
     return testURL
 
-  def buildTestPath(self, options, disabled=True):
+  def buildTestPath(self, options, testsToFilter=None, disabled=True):
     """ Build the url path to the specific test harness and test file or directory
         Build a manifest of tests to run and write out a json file for the harness to read
+        testsToFilter option is used to filter/keep the tests provided in the list
 
         disabled -- This allows to add all disabled tests on the build side
                     and then on the run side to only run the enabled ones
     """
-    self.setTestRoot(options)
-    manifest = self.getTestManifest(options)
 
-    if manifest:
-      # Python 2.6 doesn't allow unicode keys to be used for keyword
-      # arguments. This gross hack works around the problem until we
-      # rid ourselves of 2.6.
-      info = {}
-      for k, v in mozinfo.info.items():
-        if isinstance(k, unicode):
-          k = k.encode('ascii')
-        info[k] = v
-
-      # Bug 883858 - return all tests including disabled tests
-      testPath = self.getTestPath(options)
-      testPath = testPath.replace('\\', '/')
-      if testPath.endswith('.html') or \
-         testPath.endswith('.xhtml') or \
-         testPath.endswith('.xul') or \
-         testPath.endswith('.js'):
-          # In the case where we have a single file, we don't want to filter based on options such as subsuite.
-          tests = manifest.active_tests(disabled=disabled, options=None, **info)
-          for test in tests:
-              if 'disabled' in test:
-                  del test['disabled']
-      else:
-          tests = manifest.active_tests(disabled=disabled, options=options, **info)
-      paths = []
+    tests = self.getActiveTests(options, disabled)
+    paths = []
 
-      for test in tests:
-        pathAbs = os.path.abspath(test['path'])
-        assert pathAbs.startswith(self.testRootAbs)
-        tp = pathAbs[len(self.testRootAbs):].replace('\\', '/').strip('/')
-
-        # Filter out tests if we are using --test-path
-        if testPath and not tp.startswith(testPath):
-          continue
-
-        if not self.isTest(options, tp):
-          log.warning('Warning: %s from manifest %s is not a valid test' % (test['name'], test['manifest']))
-          continue
+    for test in tests:
+      if testsToFilter and (test['path'] not in testsToFilter):
+        continue
+      paths.append(test)
 
-        testob = {'path': tp}
-        if test.has_key('disabled'):
-          testob['disabled'] = test['disabled']
-        paths.append(testob)
-
-      # Sort tests so they are run in a deterministic order.
-      def path_sort(ob1, ob2):
-        path1 = ob1['path'].split('/')
-        path2 = ob2['path'].split('/')
-        return cmp(path1, path2)
-
-      paths.sort(path_sort)
-
-      # Bug 883865 - add this functionality into manifestparser
-      with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
-        manifestFile.write(json.dumps({'tests': paths}))
-      options.manifestFile = 'tests.json'
+    # Bug 883865 - add this functionality into manifestparser
+    with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
+      manifestFile.write(json.dumps({'tests': paths}))
+    options.manifestFile = 'tests.json'
 
     return self.buildTestURL(options)
 
   def startWebSocketServer(self, options, debuggerInfo):
     """ Launch the websocket server """
     self.wsserver = WebSocketServer(options, SCRIPT_DIR, debuggerInfo)
     self.wsserver.start()
 
@@ -893,16 +849,23 @@ class Mochitest(MochitestUtilsMixin):
     # (particularly after a build) takes forever.
     self.SERVER_STARTUP_TIMEOUT = 180 if mozinfo.info.get('debug') else 90
 
     # metro browser sub process id
     self.browserProcessId = None
 
 
     self.haveDumpedScreen = False
+    # Create variables to count the number of passes, fails, todos.
+    self.countpass = 0
+    self.countfail = 0
+    self.counttodo = 0
+
+    self.expectedError = {}
+    self.result = {}
 
   def extraPrefs(self, extraPrefs):
     """interpolate extra preferences from option strings"""
 
     try:
       return dict(parseKeyValue(extraPrefs, context='--setpref='))
     except KeyValueParseError, e:
       print str(e)
@@ -1073,16 +1036,17 @@ class Mochitest(MochitestUtilsMixin):
     del self.profile
     if options.pidFile != "":
       try:
         os.remove(options.pidFile)
         if os.path.exists(options.pidFile + ".xpcshell.pid"):
           os.remove(options.pidFile + ".xpcshell.pid")
       except:
         log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile)
+    options.manifestFile = None
 
   def dumpScreen(self, utilityPath):
     if self.haveDumpedScreen:
       log.info("Not taking screenshot here: see the one that was previously logged")
       return
     self.haveDumpedScreen = True
     dumpScreen(utilityPath)
 
@@ -1184,17 +1148,18 @@ class Mochitest(MochitestUtilsMixin):
              extraArgs,
              utilityPath,
              debuggerInfo=None,
              symbolsPath=None,
              timeout=-1,
              onLaunch=None,
              webapprtChrome=False,
              screenshotOnFail=False,
-             testPath=None):
+             testPath=None,
+             bisectChunk=None):
     """
     Run the app, log the duration it took to execute, return the status code.
     Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
     """
 
     # debugger information
     interactive = False
     debug_args = None
@@ -1250,16 +1215,17 @@ class Mochitest(MochitestUtilsMixin):
       # create an instance to process the output
       outputHandler = self.OutputHandler(harness=self,
                                          utilityPath=utilityPath,
                                          symbolsPath=symbolsPath,
                                          dump_screen_on_timeout=not debuggerInfo,
                                          dump_screen_on_fail=screenshotOnFail,
                                          shutdownLeaks=shutdownLeaks,
                                          lsanLeaks=lsanLeaks,
+                                         bisectChunk=bisectChunk
         )
 
       def timeoutHandler():
         browserProcessId = outputHandler.browserProcessId
         self.handleTimeout(timeout, proc, utilityPath, debuggerInfo, browserProcessId, testPath)
       kp_kwargs = {'kill_on_timeout': False,
                    'cwd': SCRIPT_DIR,
                    'onTimeout': [timeoutHandler]}
@@ -1335,73 +1301,186 @@ class Mochitest(MochitestUtilsMixin):
 
     finally:
       # cleanup
       if os.path.exists(processLog):
         os.remove(processLog)
 
     return status
 
+  def initializeLooping(self, options):
+    """
+      This method is used to clear the contents before each run of for loop.
+      This method is used for --run-by-dir and --bisect-chunk.
+    """
+    self.expectedError.clear()
+    self.result.clear()
+    options.manifestFile = None
+    options.profilePath = None
+    self.urlOpts = []
+
+  def getActiveTests(self, options, disabled=True):
+    """
+      This method is used to parse the manifest and return active filtered tests.
+    """
+    self.setTestRoot(options)
+    manifest = self.getTestManifest(options)
+
+    if manifest:
+      # Python 2.6 doesn't allow unicode keys to be used for keyword
+      # arguments. This gross hack works around the problem until we
+      # rid ourselves of 2.6.
+      info = {}
+      for k, v in mozinfo.info.items():
+        if isinstance(k, unicode):
+          k = k.encode('ascii')
+        info[k] = v
+
+      # Bug 883858 - return all tests including disabled tests
+      testPath = self.getTestPath(options)
+      testPath = testPath.replace('\\', '/')
+      if testPath.endswith('.html') or \
+         testPath.endswith('.xhtml') or \
+         testPath.endswith('.xul') or \
+         testPath.endswith('.js'):
+          # In the case where we have a single file, we don't want to filter based on options such as subsuite.
+          tests = manifest.active_tests(disabled=disabled, options=None, **info)
+          for test in tests:
+              if 'disabled' in test:
+                  del test['disabled']
+      else:
+          tests = manifest.active_tests(disabled=disabled, options=options, **info)
+    paths = []
+
+    for test in tests:
+      pathAbs = os.path.abspath(test['path'])
+      assert pathAbs.startswith(self.testRootAbs)
+      tp = pathAbs[len(self.testRootAbs):].replace('\\', '/').strip('/')
+
+      # Filter out tests if we are using --test-path
+      if testPath and not tp.startswith(testPath):
+        continue
+
+      if not self.isTest(options, tp):
+        log.warning('Warning: %s from manifest %s is not a valid test' % (test['name'], test['manifest']))
+        continue
+
+      testob = {'path': tp}
+      if test.has_key('disabled'):
+        testob['disabled'] = test['disabled']
+      paths.append(testob)
+
+    def path_sort(ob1, ob2):
+        path1 = ob1['path'].split('/')
+        path2 = ob2['path'].split('/')
+        return cmp(path1, path2)
+
+    paths.sort(path_sort)
+
+    return paths
+
+  def getTestsToRun(self, options):
+    """
+      This method makes a list of tests that are to be run. Required mainly for --bisect-chunk.
+    """
+    tests = self.getActiveTests(options)
+    testsToRun = []
+    for test in tests:
+      if test.has_key('disabled'):
+        continue
+      testsToRun.append(test['path'])
+
+    return testsToRun
+
+  def runMochitests(self, options, onLaunch=None):
+    "This is a base method for calling other methods in this class for --bisect-chunk."
+    testsToRun = self.getTestsToRun(options)
+
+    # Making an instance of bisect class for --bisect-chunk option.
+    bisect = bisection.Bisect(self)
+    finished = False
+    status = 0
+    while not finished:
+      if options.bisectChunk:
+        testsToRun = bisect.pre_test(options, testsToRun, status)
+
+      self.doTests(options, onLaunch, testsToRun)
+      if options.bisectChunk:
+        status = bisect.post_test(options, self.expectedError, self.result)
+      else:
+        status = -1
+
+      if status == -1:
+        finished = True
+
+    # We need to print the summary only if options.bisectChunk has a value.
+    # Also we need to make sure that we do not print the summary in between running tests via --run-by-dir.
+    if options.bisectChunk and options.bisectChunk in self.result:
+      bisect.print_summary()
+      return -1
+
+    return 0
+
   def runTests(self, options, onLaunch=None):
     """ Prepare, configure, run tests and cleanup """
 
-    # Create variables to count the number of passes, fails, todos.
-    self.countpass = 0
-    self.countfail = 0
-    self.counttodo = 0
-
     self.setTestRoot(options)
 
     if not options.runByDir:
-      return self.doTests(options, onLaunch)
+      self.runMochitests(options, onLaunch)
+      return 0
 
+    # code for --run-by-dir
     dirs = self.getDirectories(options)
-    
+
     if options.totalChunks > 1:
       chunkSize = int(len(dirs) / options.totalChunks) + 1
       start = chunkSize * (options.thisChunk-1)
       end = chunkSize * (options.thisChunk)
       dirs = dirs[start:end]
 
     options.totalChunks = None
     options.thisChunk = None
     options.chunkByDir = 0
     inputTestPath = self.getTestPath(options)
     for dir in dirs:
-      options.manifestFile = None
-
       if inputTestPath and not inputTestPath.startswith(dir):
         continue
 
       options.testPath = dir
       print "testpath: %s" % options.testPath
 
       # If we are using --run-by-dir, we should not use the profile path (if) provided
       # by the user, since we need to create a new directory for each run. We would face problems
       # if we use the directory provided by the user.
-      options.profilePath = None
-      self.urlOpts = []
-      self.doTests(options, onLaunch)
+      runResult = self.runMochitests(options, onLaunch)
+      if runResult == -1:
+        return 0
 
     # printing total number of tests
     if options.browserChrome:
       print "TEST-INFO | checking window state"
       print "Browser Chrome Test Summary"
       print "\tPassed: %s" % self.countpass
       print "\tFailed: %s" % self.countfail
       print "\tTodo: %s" % self.counttodo
       print "*** End BrowserChrome Test Results ***"
     else:
       print "0 INFO TEST-START | Shutdown"
       print "1 INFO Passed:  %s" % self.countpass
       print "2 INFO Failed:  %s" % self.countfail
       print "3 INFO Todo:    %s" % self.counttodo
       print "4 INFO SimpleTest FINISHED"
 
-  def doTests(self, options, onLaunch=None):
+  def doTests(self, options, onLaunch=None, testsToFilter = None):
+    # A call to initializeLooping method is required in case of --run-by-dir or --bisect-chunk
+    # since we need to initialize variables for each loop.
+    if options.bisectChunk or options.runByDir:
+      self.initializeLooping(options)
+
     # get debugger info, a dict of:
     # {'path': path to the debugger (string),
     #  'interactive': whether the debugger is interactive or not (bool)
     #  'args': arguments to the debugger (list)
     # TODO: use mozrunner.local.debugger_arguments:
     # https://github.com/mozilla/mozbase/blob/master/mozrunner/mozrunner/local.py#L42
     debuggerInfo = getDebuggerInfo(self.oldcwd,
                                    options.debugger,
@@ -1426,17 +1505,18 @@ class Mochitest(MochitestUtilsMixin):
 
     self.browserEnv = self.buildBrowserEnv(options, debuggerInfo is not None)
     if self.browserEnv is None:
       return 1
 
     try:
       self.startServers(options, debuggerInfo)
 
-      testURL = self.buildTestPath(options)
+      # testsToFilter parameter is used to filter out the test list that is sent to buildTestPath
+      testURL = self.buildTestPath(options, testsToFilter)
 
       # read the number of tests here, if we are not going to run any, terminate early
       if os.path.exists(os.path.join(SCRIPT_DIR, 'tests.json')):
         with open(os.path.join(SCRIPT_DIR, 'tests.json')) as fHandle:
           tests = json.load(fHandle)
         count = 0
         for test in tests['tests']:
           count += 1
@@ -1483,17 +1563,18 @@ class Mochitest(MochitestUtilsMixin):
                              extraArgs=options.browserArgs,
                              utilityPath=options.utilityPath,
                              debuggerInfo=debuggerInfo,
                              symbolsPath=options.symbolsPath,
                              timeout=timeout,
                              onLaunch=onLaunch,
                              webapprtChrome=options.webapprtChrome,
                              screenshotOnFail=options.screenshotOnFail,
-                             testPath=options.testPath
+                             testPath=options.testPath,
+                             bisectChunk=options.bisectChunk
         )
       except KeyboardInterrupt:
         log.info("runtests.py | Received keyboard interrupt.\n");
         status = -1
       except:
         traceback.print_exc()
         log.error("Automation Error: Received unexpected exception while running application\n")
         status = 1
@@ -1527,44 +1608,48 @@ class Mochitest(MochitestUtilsMixin):
       log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output", self.lastTestSeen, int(timeout))
     browserProcessId = browserProcessId or proc.pid
     self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
 
   ### output processing
 
   class OutputHandler(object):
     """line output handler for mozrunner"""
-    def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True, dump_screen_on_fail=False, shutdownLeaks=None, lsanLeaks=None):
+    def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True, dump_screen_on_fail=False, shutdownLeaks=None, lsanLeaks=None, bisectChunk=None):
       """
       harness -- harness instance
       dump_screen_on_timeout -- whether to dump the screen on timeout
       """
       self.harness = harness
       self.utilityPath = utilityPath
       self.symbolsPath = symbolsPath
       self.dump_screen_on_timeout = dump_screen_on_timeout
       self.dump_screen_on_fail = dump_screen_on_fail
       self.shutdownLeaks = shutdownLeaks
       self.lsanLeaks = lsanLeaks
+      self.bisectChunk = bisectChunk
 
       # perl binary to use
       self.perl = which('perl')
 
       # With metro browser runs this script launches the metro test harness which launches the browser.
       # The metro test harness hands back the real browser process id via log output which we need to
       # pick up on and parse out. This variable tracks the real browser process id if we find it.
       self.browserProcessId = None
 
       # stack fixer function and/or process
       self.stackFixerFunction, self.stackFixerProcess = self.stackFixer()
 
     def processOutputLine(self, line):
       """per line handler of output for mozprocess"""
       for handler in self.outputHandlers():
         line = handler(line)
+      if self.bisectChunk:
+        self.record_result(line)
+        self.first_error(line)
     __call__ = processOutputLine
 
     def outputHandlers(self):
       """returns ordered list of output handlers"""
       return [self.fix_stack,
               self.format,
               self.record_last_test,
               self.dumpScreenOnTimeout,
@@ -1624,16 +1709,35 @@ class Mochitest(MochitestUtilsMixin):
         self.shutdownLeaks.process()
 
       if self.lsanLeaks:
         self.lsanLeaks.process()
 
 
     # output line handlers:
     # these take a line and return a line
+    def record_result(self, line):
+      if "TEST-START" in line: #by default make the result key equal to pass.
+        key = line.split('|')[-1].split('/')[-1].strip()
+        self.harness.result[key] = "PASS"
+      elif "TEST-UNEXPECTED" in line:
+        key = line.split('|')[-2].split('/')[-1].strip()
+        self.harness.result[key] = "FAIL"
+      elif "TEST-KNOWN-FAIL" in line:
+        key = line.split('|')[-2].split('/')[-1].strip()
+        self.harness.result[key] = "TODO"
+      return line
+
+    def first_error(self, line):
+      if "TEST-UNEXPECTED-FAIL" in line:
+        key = line.split('|')[-2].split('/')[-1].strip()
+        if key not in self.harness.expectedError:
+          self.harness.expectedError[key] = line.split('|')[-1].strip()
+      return line
+
     def countline(self, line):
       val = 0
       try:
         val = int(line.split(':')[-1].strip())
       except ValueError, e:
         return line
 
       if "Passed:" in line:
--- a/testing/mochitest/runtestsb2g.py
+++ b/testing/mochitest/runtestsb2g.py
@@ -64,19 +64,19 @@ class B2GMochitest(MochitestUtilsMixin):
         # causes manifestFile not to be set
         if not "manifestFile=tests.json" in self.urlOpts:
             self.urlOpts.append("manifestFile=%s" % options.manifestFile)
 
         if len(self.urlOpts) > 0:
             test_url += "?" + "&".join(self.urlOpts)
         self.test_script_args.append(test_url)
 
-    def buildTestPath(self, options):
+    def buildTestPath(self, options, testsToFilter=None):
         if options.manifestFile != 'tests.json':
-            super(B2GMochitest, self).buildTestPath(options, disabled=False)
+            super(B2GMochitest, self).buildTestPath(options, testsToFilter, disabled=False)
         return self.buildTestURL(options)
 
     def build_profile(self, options):
         # preferences
         prefs = {}
         for path in self.preferences:
             prefs.update(Preferences.read_prefs(path))
 
--- a/testing/mochitest/runtestsremote.py
+++ b/testing/mochitest/runtestsremote.py
@@ -375,23 +375,23 @@ class MochiRemote(Mochitest):
             except devicemanager.DMError:
                 log.error("Automation Error: Unable to copy profile to device.")
                 raise
 
         options.profilePath = self.remoteProfile
         options.logFile = self.localLog
         return retVal
 
-    def buildTestPath(self, options):
+    def buildTestPath(self, options, testsToFilter=None):
         if options.robocopIni != "":
             # Skip over manifest building if we just want to run
             # robocop tests.
             return self.buildTestURL(options)
         else:
-            return super(MochiRemote, self).buildTestPath(options)
+            return super(MochiRemote, self).buildTestPath(options, testsToFilter)
 
     def installChromeFile(self, filename, options):
         parts = options.app.split('/')
         if (parts[0] == options.app):
           return "NO_CHROME_ON_DROID"
         path = '/'.join(parts[:-1])
         manifest = path + "/chrome/" + os.path.basename(filename)
         try: