Bug 1275269 - Refactor & test BaseMarionetteTestRunner.run_tests; r=maja_zf
authorAnjana Vakil <anjanavakil@gmail.com>
Thu, 23 Jun 2016 17:48:47 +0200
changeset 302705 b60487f638fd937ec630c546df55aebc12aa6795
parent 302704 e621d5f0c6b539fe3a6e6a89a7a3f3a2f6d55b8c
child 302706 0cbfbfe2a193c56e656b38cd054a09ea5f4d3076
push id78836
push usermjzffr@gmail.com
push dateMon, 27 Jun 2016 15:00:57 +0000
treeherdermozilla-inbound@0cbfbfe2a193 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmaja_zf
bugs1275269
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1275269 - Refactor & test BaseMarionetteTestRunner.run_tests; r=maja_zf Refactor BaseMarionetteTestRunner.run_tests into smaller sub-methods. Refactor _print_summary, moving functionality not related to summary-printing into run_tests. In test_marionette_runner.py, test run_tests and submethod: - Add mock_runner fixture to create a runner instance with some mocked properties - Test reset_test_stats - Test _initialize_test_run MozReview-Commit-ID: 7k1GJ0dyLCe
testing/marionette/harness/marionette/runner/base.py
testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
--- a/testing/marionette/harness/marionette/runner/base.py
+++ b/testing/marionette/harness/marionette/runner/base.py
@@ -691,19 +691,16 @@ class BaseMarionetteTestRunner(object):
                     connection.connect((host,int(port)))
                     connection.close()
                 except Exception, e:
                     raise Exception("Connection attempt to %s:%s failed with error: %s" %(host,port,e))
         if self.workspace:
             kwargs['workspace'] = self.workspace_path
         return kwargs
 
-    def start_marionette(self):
-        self.marionette = self.driverclass(**self._build_kwargs())
-
     def launch_test_container(self):
         if self.marionette.session is None:
             self.marionette.start_session()
         self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
 
         result = self.marionette.execute_async_script("""
 if((navigator.mozSettings == undefined) || (navigator.mozSettings == null) || (navigator.mozApps == undefined) || (navigator.mozApps == null)) {
     marionetteScriptFinished(false);
@@ -750,90 +747,116 @@ setReq.onerror = function() {
         crash = True
         try:
             crash = self.marionette.check_for_crash()
             self.crashed += int(crash)
         except Exception:
             traceback.print_exc()
         return crash
 
-    def run_tests(self, tests):
+    def _initialize_test_run(self, tests):
         assert len(tests) > 0
         assert len(self.test_handlers) > 0
         self.reset_test_stats()
-        self.start_time = time.time()
 
+    def _start_marionette(self):
         need_external_ip = True
         if not self.marionette:
-            self.start_marionette()
+            self.marionette = self.driverclass(**self._build_kwargs())
             # if we're working against a desktop version, we usually don't need
             # an external ip
             if self.capabilities['device'] == "desktop":
                 need_external_ip = False
         self.logger.info('Initial Profile Destination is '
                          '"{}"'.format(self.marionette.profile_path))
+        return need_external_ip
 
+    def _set_baseurl(self, need_external_ip):
         # Gaia sets server_root and that means we shouldn't spin up our own httpd
         if not self.httpd:
             if self.server_root is None or os.path.isdir(self.server_root):
                 self.logger.info("starting httpd")
                 self.start_httpd(need_external_ip)
                 self.marionette.baseurl = self.httpd.get_url()
                 self.logger.info("running httpd on %s" % self.marionette.baseurl)
             else:
                 self.marionette.baseurl = self.server_root
                 self.logger.info("using remote content from %s" % self.marionette.baseurl)
 
-        device_info = None
 
+    def _add_tests(self, tests):
         for test in tests:
             self.add_test(test)
 
-        # ensure we have only tests files with names starting with 'test_'
         invalid_tests = \
             [t['filepath'] for t in self.tests
              if not os.path.basename(t['filepath']).startswith('test_')]
         if invalid_tests:
-            raise Exception("Tests file names must starts with 'test_'."
+            raise Exception("Tests file names must start with 'test_'."
                             " Invalid test names:\n  %s"
                             % '\n  '.join(invalid_tests))
 
-        self.logger.info("running with e10s: {}".format(self.e10s))
-        version_info = mozversion.get_version(binary=self.bin,
-                                              sources=self.sources,
-                                              dm_type=os.environ.get('DM_TRANS', 'adb') )
-
-        self.logger.suite_start(self.tests,
-                                version_info=version_info,
-                                device_info=device_info)
-
+    def _log_skipped_tests(self):
         for test in self.manifest_skipped_tests:
             name = os.path.basename(test['path'])
             self.logger.test_start(name)
             self.logger.test_end(name,
                                  'SKIP',
                                  message=test['disabled'])
             self.todo += 1
 
+    def run_tests(self, tests):
+        start_time = time.time()
+        self._initialize_test_run(tests)
+
+        need_external_ip = self._start_marionette()
+        self._set_baseurl(need_external_ip)
+
+        self._add_tests(tests)
+
+        self.logger.info("running with e10s: {}".format(self.e10s))
+        version_info = mozversion.get_version(binary=self.bin,
+                                              sources=self.sources,
+                                              dm_type=os.environ.get('DM_TRANS', 'adb') )
+
+        self.logger.suite_start(self.tests, version_info=version_info)
+
+        self._log_skipped_tests()
+
         interrupted = None
         try:
             counter = self.repeat
             while counter >=0:
-                round = self.repeat - counter
-                if round > 0:
-                    self.logger.info('\nREPEAT %d\n-------' % round)
+                round_num = self.repeat - counter
+                if round_num > 0:
+                    self.logger.info('\nREPEAT %d\n-------' % round_num)
                 self.run_test_sets()
                 counter -= 1
         except KeyboardInterrupt:
             # in case of KeyboardInterrupt during the test execution
             # we want to display current test results.
             # so we keep the exception to raise it later.
             interrupted = sys.exc_info()
         try:
             self._print_summary(tests)
+            self.record_crash()
+            self.elapsedtime = time.time() - start_time
+
+            if self.marionette.instance:
+                self.marionette.instance.close()
+                self.marionette.instance = None
+            self.marionette.cleanup()
+
+            for run_tests in self.mixin_run_tests:
+                run_tests(tests)
+            if self.shuffle:
+                self.logger.info("Using seed where seed is:%d" % self.shuffle_seed)
+
+            self.logger.info('mode: {}'.format('e10s' if self.e10s else 'non-e10s'))
+            self.logger.suite_end()
         except:
             # raise only the exception if we were not interrupted
             if not interrupted:
                 raise
         finally:
             # reraise previous interruption now
             if interrupted:
                 raise interrupted[0], interrupted[1], interrupted[2]
@@ -850,34 +873,16 @@ setReq.onerror = function() {
         else:
             self.logger.info('todo: %d (skipped: %d)' % (self.todo, self.skipped))
 
         if self.failed > 0:
             self.logger.info('\nFAILED TESTS\n-------')
             for failed_test in self.failures:
                 self.logger.info('%s' % failed_test[0])
 
-        self.record_crash()
-        self.end_time = time.time()
-        self.elapsedtime = self.end_time - self.start_time
-
-        if self.marionette.instance:
-            self.marionette.instance.close()
-            self.marionette.instance = None
-
-        self.marionette.cleanup()
-
-        for run_tests in self.mixin_run_tests:
-            run_tests(tests)
-        if self.shuffle:
-            self.logger.info("Using seed where seed is:%d" % self.shuffle_seed)
-
-        self.logger.info('mode: {}'.format('e10s' if self.e10s else 'non-e10s'))
-        self.logger.suite_end()
-
     def start_httpd(self, need_external_ip):
         warnings.warn("start_httpd has been deprecated in favour of create_httpd",
             DeprecationWarning)
         self.httpd = self.create_httpd(need_external_ip)
 
     def create_httpd(self, need_external_ip):
         host = "127.0.0.1"
         if need_external_ip:
--- a/testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
+++ b/testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
@@ -1,20 +1,21 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 import pytest
-from mock import patch, Mock, DEFAULT, mock_open
+from mock import patch, Mock, DEFAULT, mock_open, MagicMock
 
 from marionette.runtests import (
     MarionetteTestRunner,
     MarionetteHarness,
     cli
 )
 from marionette.runner import MarionetteTestResult
+from marionette_driver.marionette import Marionette
 
 # avoid importing MarionetteJSTestCase to prevent pytest from
 # collecting and running it as part of this test suite
 import marionette.marionette_test as marionette_test
 
 
 def _check_crash_counts(has_crashed, runner, mock_marionette):
     if has_crashed:
@@ -22,18 +23,17 @@ def _check_crash_counts(has_crashed, run
         assert runner.crashed == 1
     else:
         assert runner.crashed == 0
 
 
 @pytest.fixture()
 def mock_marionette(request):
     """ Mock marionette instance """
-    import marionette_driver
-    marionette = Mock(spec=marionette_driver.marionette.Marionette)
+    marionette = MagicMock(spec=Marionette)
     if 'has_crashed' in request.funcargnames:
         marionette.check_for_crash.return_value = request.getfuncargvalue(
             'has_crashed'
         )
     return marionette
 
 
 @pytest.fixture()
@@ -124,16 +124,28 @@ def mach_parsed_kwargs(logger):
 @pytest.fixture()
 def runner(mach_parsed_kwargs):
     """
     MarionetteTestRunner instance initialized with default options.
     """
     return MarionetteTestRunner(**mach_parsed_kwargs)
 
 
+@pytest.fixture()
+def mock_runner(runner, mock_marionette):
+    """
+    MarionetteTestRunner instance with mocked-out
+    self.marionette and other properties.
+    """
+    runner.driverclass = mock_marionette
+    runner._set_baseurl = Mock()
+    runner.run_test_set = Mock()
+    return runner
+
+
 @pytest.fixture
 def harness_class(request):
     """
     Mock based on MarionetteHarness whose run method just returns a number of
     failures according to the supplied test parameter
     """
     if 'num_fails_crashed' in request.funcargnames:
         num_fails_crashed = request.getfuncargvalue('num_fails_crashed')
@@ -353,11 +365,41 @@ def test_add_test_manifest(runner):
     for test in runner.tests:
         assert test['filepath'].endswith(('test_expected_pass.py', 'test_expected_fail.py'))
         if test['filepath'].endswith('test_expected_fail.py'):
             assert test['expected'] == 'fail'
         else:
             assert test['expected'] == 'pass'
 
 
+def test_reset_test_stats(runner):
+    def reset_successful(runner):
+        stats = ['passed', 'failed', 'unexpected_successes', 'todo', 'skipped', 'failures']
+        return all([((s in vars(runner)) and (not vars(runner)[s])) for s in stats])
+    assert reset_successful(runner)
+    runner.passed = 1
+    runner.failed = 1
+    runner.failures.append(['TEST-UNEXPECTED-FAIL'])
+    assert not reset_successful(runner)
+    with pytest.raises(Exception):
+        runner.run_tests([u'test_fake_thing.py'])
+    assert reset_successful(runner)
+
+
+def test_initialize_test_run(mock_runner):
+    tests = [u'test_fake_thing.py']
+    mock_runner.reset_test_stats = Mock()
+    with patch('marionette.runner.base.mozversion.get_version'):
+        mock_runner.run_tests(tests)
+    assert mock_runner.reset_test_stats.called
+    with pytest.raises(AssertionError) as test_exc:
+        mock_runner.run_tests([])
+    assert "len(tests)" in str(test_exc.traceback[-1].statement)
+    with pytest.raises(AssertionError) as hndl_exc:
+        mock_runner.test_handlers = []
+        mock_runner.run_tests(tests)
+    assert "test_handlers" in str(hndl_exc.traceback[-1].statement)
+    assert mock_runner.reset_test_stats.call_count == 1
+
+
 if __name__ == '__main__':
     import sys
     sys.exit(pytest.main(['--verbose', __file__]))