Merge mozilla-central to autoland
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Fri, 21 Apr 2017 16:03:03 +0200
changeset 402418 fe2fff73428147c36b75189670a36cee6ff07b39
parent 402417 2d53e3a676bb1617ac701b5470a91ec831bb921b (current diff)
parent 402381 c8198aa6e7677e90cc7f1e2df0a14a5cc2719055 (diff)
child 402419 fee151a11c16a1a6995770aa4c3400b31d2faaf0
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland
--- a/testing/mozharness/mozharness/base/script.py
+++ b/testing/mozharness/mozharness/base/script.py
@@ -31,17 +31,16 @@ import sys
 import tarfile
 import time
 import traceback
 import urllib2
 import zipfile
 import httplib
 import urlparse
 import hashlib
-import zlib
 if os.name == 'nt':
     try:
         import win32file
         import win32api
         PYWIN32 = True
     except ImportError:
         PYWIN32 = False
 
@@ -54,17 +53,17 @@ except ImportError:
 from io import BytesIO
 
 from mozprocess import ProcessHandler
 from mozharness.base.config import BaseConfig
 from mozharness.base.log import SimpleFileLogger, MultiFileLogger, \
     LogMixin, OutputParser, DEBUG, INFO, ERROR, FATAL
 
 
-class ContentLengthMismatch(Exception):
+class FetchedIncorrectFilesize(Exception):
     pass
 
 
 def platform_name():
     pm = PlatformMixin()
 
     if pm._is_linux() and pm._is_64_bit():
         return 'linux64'
@@ -351,31 +350,31 @@ class ScriptMixin(PlatformMixin):
     def fetch_url_into_memory(self, url):
         ''' Downloads a file from a url into memory instead of disk.
 
         Args:
             url (str): URL path where the file to be downloaded is located.
 
         Raises:
             IOError: When the url points to a file on disk and cannot be found
-            ContentLengthMismatch: When the length of the retrieved content does not match the
-                                   Content-Length response header.
+            FetchedIncorrectFilesize: When the size of the fetched file does not match the
+                                      expected file size.
             ValueError: When the scheme of a url is not what is expected.
 
         Returns:
             BytesIO: contents of url
         '''
         self.info('Fetch {} into memory'.format(url))
         parsed_url = urlparse.urlparse(url)
 
         if parsed_url.scheme in ('', 'file'):
             if not os.path.isfile(url):
                 raise IOError('Could not find file to extract: {}'.format(url))
 
-            content_length = os.stat(url.replace('file://', '')).st_size
+            expected_file_size = os.stat(url.replace('file://', '')).st_size
 
             # In case we're referrencing a file without file://
             if parsed_url.scheme == '':
                 url = 'file://%s' % os.path.abspath(url)
                 parsed_url = urlparse.urlparse(url)
 
         request = urllib2.Request(url)
         # When calling fetch_url_into_memory() you should retry when we raise one of these exceptions:
@@ -388,43 +387,36 @@ class ScriptMixin(PlatformMixin):
         # * Bug 1301855 - URLError: <urlopen error [Errno 60] Operation timed out>
         # * Bug 1302237 - URLError: <urlopen error [Errno 104] Connection reset by peer>
         # * Bug 1301807 - BadStatusLine: ''
         #
         # Bug 1309912 - Adding timeout in hopes to solve blocking on response.read() (bug 1300413)
         response = urllib2.urlopen(request, timeout=30)
 
         if parsed_url.scheme in ('http', 'https'):
-            content_length = int(response.headers.get('Content-Length'))
-
-        response_body = response.read()
-        response_body_size = len(response_body)
-
-        self.info('Content-Length response header: {}'.format(content_length))
-        self.info('Bytes received: {}'.format(response_body_size))
+            expected_file_size = int(response.headers.get('Content-Length'))
 
-        if response_body_size != content_length:
-            raise ContentLengthMismatch(
-                'The retrieved Content-Length header declares a body length of {} bytes, while we actually retrieved {} bytes'.format(
-                    content_length, response_body_size)
-            )
+        file_contents = response.read()
+        obtained_file_size = len(file_contents)
+        self.info('Expected file size: {}'.format(expected_file_size))
+        self.info('Obtained file size: {}'.format(obtained_file_size))
 
-        if response.info().get('Content-Encoding') == 'gzip':
-            self.info('Content-Encoding is "gzip", so decompressing response body')
-            file_contents = zlib.decompress(response_body)
-        else:
-            file_contents = response_body
+        if obtained_file_size != expected_file_size:
+            raise FetchedIncorrectFilesize(
+                'The expected file size is {} while we got instead {}'.format(
+                    expected_file_size, obtained_file_size)
+            )
 
         # Use BytesIO instead of StringIO
         # http://stackoverflow.com/questions/34162017/unzip-buffer-with-python/34162395#34162395
         return BytesIO(file_contents)
 
 
     def _download_file(self, url, file_name):
-        """ Helper function for download_file()
+        """ Helper script for download_file()
         Additionaly this function logs all exceptions as warnings before
         re-raising them
 
         Args:
             url (str): string containing the URL with the file location
             file_name (str): name of the file where the downloaded file
                              is written.
 
@@ -446,39 +438,27 @@ class ScriptMixin(PlatformMixin):
 
         try:
             f_length = None
             f = self._urlopen(url, timeout=30)
 
             if f.info().get('content-length') is not None:
                 f_length = int(f.info()['content-length'])
                 got_length = 0
-            if f.info().get('Content-Encoding') == 'gzip':
-                # Note, we'll download the full compressed content into its own
-                # file, since that allows the gzip library to seek through it.
-                # Once downloaded, we'll decompress it into the real target
-                # file, and delete the compressed version.
-                local_file = open(file_name + '.gz', 'wb')
-            else:
-                local_file = open(file_name, 'wb')
+            local_file = open(file_name, 'wb')
             while True:
                 block = f.read(1024 ** 2)
                 if not block:
                     if f_length is not None and got_length != f_length:
                         raise urllib2.URLError("Download incomplete; content-length was %d, but only received %d" % (f_length, got_length))
                     break
                 local_file.write(block)
                 if f_length is not None:
                     got_length += len(block)
             local_file.close()
-            if f.info().get('Content-Encoding') == 'gzip':
-                # Decompress file into target location, then remove compressed version
-                with gzip.open(file_name + '.gz', 'rb') as f_in, open(file_name, 'wb') as f_out:
-                    shutil.copyfileobj(f_in, f_out)
-                os.remove(file_name + '.gz')
             return file_name
         except urllib2.HTTPError, e:
             self.warning("Server returned status %s %s for %s" % (str(e.code), str(e), url))
             raise
         except urllib2.URLError, e:
             self.warning("URL Error: %s" % url)
 
             # Failures due to missing local files won't benefit from retry.
@@ -681,17 +661,17 @@ class ScriptMixin(PlatformMixin):
         # 1) Let's fetch the file
         retry_args = dict(
             retry_exceptions=(
                 urllib2.HTTPError,
                 urllib2.URLError,
                 httplib.BadStatusLine,
                 socket.timeout,
                 socket.error,
-                ContentLengthMismatch,
+                FetchedIncorrectFilesize,
             ),
             sleeptime=30,
             attempts=5,
             error_message="Can't download from {}".format(url),
             error_level=FATAL,
         )
         compressed_file = self.retry(
             self.fetch_url_into_memory,