Bug 1304390 - Compress/decompress script sources in chunks. r=luke
authorJan de Mooij <jdemooij@mozilla.com>
Fri, 23 Sep 2016 12:55:14 +0200
changeset 315145 52459cfd15765aa82ff26501b9c1a56272d49f0d
parent 315144 c4d91d17c8ed2e385f4a4a3d97f861eee722626f
child 315146 01aa7f5bde71d0a0c87889b6642ebbe8c6dbf498
push id32563
push userihsiao@mozilla.com
push dateMon, 26 Sep 2016 11:18:33 +0000
treeherderautoland@eb840c87b5fd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1304390
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1304390 - Compress/decompress script sources in chunks. r=luke
js/src/jsfun.cpp
js/src/jsscript.cpp
js/src/jsscript.h
js/src/vm/Compression.cpp
js/src/vm/Compression.h
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -1489,25 +1489,23 @@ JSFunction::createScriptForLazilyInterpr
             if (!lazy->maybeScript())
                 lazy->initScript(clonedScript);
             return true;
         }
 
         MOZ_ASSERT(lazy->scriptSource()->hasSourceData());
 
         // Parse and compile the script from source.
+        size_t lazyLength = lazy->end() - lazy->begin();
         UncompressedSourceCache::AutoHoldEntry holder;
-        const char16_t* chars = lazy->scriptSource()->chars(cx, holder);
+        const char16_t* chars = lazy->scriptSource()->chars(cx, holder, lazy->begin(), lazyLength);
         if (!chars)
             return false;
 
-        const char16_t* lazyStart = chars + lazy->begin();
-        size_t lazyLength = lazy->end() - lazy->begin();
-
-        if (!frontend::CompileLazyFunction(cx, lazy, lazyStart, lazyLength)) {
+        if (!frontend::CompileLazyFunction(cx, lazy, chars, lazyLength)) {
             // The frontend may have linked the function and the non-lazy
             // script together during bytecode compilation. Reset it now on
             // error.
             fun->initLazyScript(lazy);
             if (lazy->hasScript())
                 lazy->resetScript();
             return false;
         }
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1405,105 +1405,114 @@ JSScript::loadSource(JSContext* cx, Scri
 JSFlatString*
 JSScript::sourceData(JSContext* cx)
 {
     MOZ_ASSERT(scriptSource()->hasSourceData());
     return scriptSource()->substring(cx, sourceStart(), sourceEnd());
 }
 
 UncompressedSourceCache::AutoHoldEntry::AutoHoldEntry()
-  : cache_(nullptr), source_(nullptr)
+  : cache_(nullptr), sourceChunk_()
 {
 }
 
 void
-UncompressedSourceCache::AutoHoldEntry::holdEntry(UncompressedSourceCache* cache, ScriptSource* source)
+UncompressedSourceCache::AutoHoldEntry::holdEntry(UncompressedSourceCache* cache,
+                                                  const ScriptSourceChunk& sourceChunk)
 {
     // Initialise the holder for a specific cache and script source. This will
     // hold on to the cached source chars in the event that the cache is purged.
-    MOZ_ASSERT(!cache_ && !source_ && !charsToFree_);
+    MOZ_ASSERT(!cache_ && !sourceChunk_.valid() && !charsToFree_);
     cache_ = cache;
-    source_ = source;
+    sourceChunk_ = sourceChunk;
+}
+
+void
+UncompressedSourceCache::AutoHoldEntry::holdChars(UniqueTwoByteChars chars)
+{
+    MOZ_ASSERT(!cache_ && !sourceChunk_.valid() && !charsToFree_);
+    charsToFree_ = Move(chars);
 }
 
 void
 UncompressedSourceCache::AutoHoldEntry::deferDelete(UniqueTwoByteChars chars)
 {
     // Take ownership of source chars now the cache is being purged. Remove our
     // reference to the ScriptSource which might soon be destroyed.
-    MOZ_ASSERT(cache_ && source_ && !charsToFree_);
+    MOZ_ASSERT(cache_ && sourceChunk_.valid() && !charsToFree_);
     cache_ = nullptr;
-    source_ = nullptr;
+    sourceChunk_ = ScriptSourceChunk();
     charsToFree_ = Move(chars);
 }
 
 UncompressedSourceCache::AutoHoldEntry::~AutoHoldEntry()
 {
     if (cache_) {
-        MOZ_ASSERT(source_);
+        MOZ_ASSERT(sourceChunk_.valid());
         cache_->releaseEntry(*this);
     }
 }
 
 void
-UncompressedSourceCache::holdEntry(AutoHoldEntry& holder, ScriptSource* ss)
+UncompressedSourceCache::holdEntry(AutoHoldEntry& holder, const ScriptSourceChunk& ssc)
 {
     MOZ_ASSERT(!holder_);
-    holder.holdEntry(this, ss);
+    holder.holdEntry(this, ssc);
     holder_ = &holder;
 }
 
 void
 UncompressedSourceCache::releaseEntry(AutoHoldEntry& holder)
 {
     MOZ_ASSERT(holder_ == &holder);
     holder_ = nullptr;
 }
 
 const char16_t*
-UncompressedSourceCache::lookup(ScriptSource* ss, AutoHoldEntry& holder)
+UncompressedSourceCache::lookup(const ScriptSourceChunk& ssc, AutoHoldEntry& holder)
 {
     MOZ_ASSERT(!holder_);
     if (!map_)
         return nullptr;
-    if (Map::Ptr p = map_->lookup(ss)) {
-        holdEntry(holder, ss);
+    if (Map::Ptr p = map_->lookup(ssc)) {
+        holdEntry(holder, ssc);
         return p->value().get();
     }
     return nullptr;
 }
 
 bool
-UncompressedSourceCache::put(ScriptSource* ss, UniqueTwoByteChars str, AutoHoldEntry& holder)
+UncompressedSourceCache::put(const ScriptSourceChunk& ssc, UniqueTwoByteChars str,
+                             AutoHoldEntry& holder)
 {
     MOZ_ASSERT(!holder_);
 
     if (!map_) {
         UniquePtr<Map> map = MakeUnique<Map>();
         if (!map || !map->init())
             return false;
 
         map_ = Move(map);
     }
 
-    if (!map_->put(ss, Move(str)))
+    if (!map_->put(ssc, Move(str)))
         return false;
 
-    holdEntry(holder, ss);
+    holdEntry(holder, ssc);
     return true;
 }
 
 void
 UncompressedSourceCache::purge()
 {
     if (!map_)
         return;
 
     for (Map::Range r = map_->all(); !r.empty(); r.popFront()) {
-        if (holder_ && r.front().key() == holder_->source()) {
+        if (holder_ && r.front().key() == holder_->sourceChunk()) {
             holder_->deferDelete(Move(r.front().value()));
             holder_ = nullptr;
         }
     }
 
     map_.reset();
 }
 
@@ -1515,111 +1524,158 @@ UncompressedSourceCache::sizeOfExcluding
         n += map_->sizeOfIncludingThis(mallocSizeOf);
         for (Map::Range r = map_->all(); !r.empty(); r.popFront())
             n += mallocSizeOf(r.front().value().get());
     }
     return n;
 }
 
 const char16_t*
-ScriptSource::chars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& holder)
+ScriptSource::chunkChars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& holder,
+                         size_t chunk)
 {
-    struct CharsMatcher
+    const Compressed& c = data.as<Compressed>();
+
+    ScriptSourceChunk ssc(this, chunk);
+    if (const char16_t* decompressed = cx->caches.uncompressedSourceCache.lookup(ssc, holder))
+        return decompressed;
+
+    size_t totalLengthInBytes = length() * sizeof(char16_t);
+    size_t chunkBytes = Compressor::chunkSize(totalLengthInBytes, chunk);
+
+    MOZ_ASSERT((chunkBytes % sizeof(char16_t)) == 0);
+    const size_t lengthWithNull = (chunkBytes / sizeof(char16_t)) + 1;
+    UniqueTwoByteChars decompressed(js_pod_malloc<char16_t>(lengthWithNull));
+    if (!decompressed) {
+        JS_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    if (!DecompressStringChunk((const unsigned char*) c.raw.chars(),
+                               chunk,
+                               reinterpret_cast<unsigned char*>(decompressed.get()),
+                               chunkBytes))
     {
-        JSContext* cx;
-        ScriptSource& ss;
-        UncompressedSourceCache::AutoHoldEntry& holder;
-
-        explicit CharsMatcher(JSContext* cx, ScriptSource& ss,
-                              UncompressedSourceCache::AutoHoldEntry& holder)
-          : cx(cx)
-          , ss(ss)
-          , holder(holder)
-        { }
-
-        const char16_t* match(Uncompressed& u) {
-            return u.string.chars();
+        JS_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    decompressed[lengthWithNull - 1] = '\0';
+
+    const char16_t* ret = decompressed.get();
+    if (!cx->caches.uncompressedSourceCache.put(ssc, Move(decompressed), holder)) {
+        JS_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+    return ret;
+}
+
+const char16_t*
+ScriptSource::chars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& holder,
+                    size_t begin, size_t len)
+{
+    MOZ_ASSERT(begin + len <= length());
+
+    if (data.is<Uncompressed>()) {
+        const char16_t* chars = data.as<Uncompressed>().string.chars();
+        if (!chars)
+            return nullptr;
+        return chars + begin;
+    }
+
+    if (data.is<Missing>())
+        MOZ_CRASH("ScriptSource::chars() on ScriptSource with SourceType = Missing");
+
+    MOZ_ASSERT(data.is<Compressed>());
+
+    // Determine which chunk(s) we are interested in, and the offsets within
+    // these chunks.
+    size_t firstChunk, lastChunk;
+    size_t firstChunkOffset, lastChunkOffset;
+    MOZ_ASSERT(len > 0);
+    Compressor::toChunkOffset(begin * sizeof(char16_t), &firstChunk, &firstChunkOffset);
+    Compressor::toChunkOffset((begin + len - 1) * sizeof(char16_t), &lastChunk, &lastChunkOffset);
+
+    MOZ_ASSERT(firstChunkOffset % sizeof(char16_t) == 0);
+    size_t firstChar = firstChunkOffset / sizeof(char16_t);
+
+    if (firstChunk == lastChunk) {
+        const char16_t* chars = chunkChars(cx, holder, firstChunk);
+        if (!chars)
+            return nullptr;
+        return chars + firstChar;
+    }
+
+    // We need multiple chunks. Allocate a (null-terminated) buffer to hold
+    // |len| chars and copy uncompressed chars from the chunks into it. We use
+    // chunkChars() so we benefit from chunk caching by UncompressedSourceCache.
+
+    MOZ_ASSERT(firstChunk < lastChunk);
+
+    size_t lengthWithNull = len + 1;
+    UniqueTwoByteChars decompressed(js_pod_malloc<char16_t>(lengthWithNull));
+    if (!decompressed) {
+        JS_ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    size_t totalLengthInBytes = length() * sizeof(char16_t);
+    char16_t* cursor = decompressed.get();
+
+    for (size_t i = firstChunk; i <= lastChunk; i++) {
+        UncompressedSourceCache::AutoHoldEntry chunkHolder;
+        const char16_t* chars = chunkChars(cx, chunkHolder, i);
+        if (!chars)
+            return nullptr;
+
+        size_t numChars = Compressor::chunkSize(totalLengthInBytes, i) / sizeof(char16_t);
+        if (i == firstChunk) {
+            MOZ_ASSERT(firstChar < numChars);
+            chars += firstChar;
+            numChars -= firstChar;
+        } else if (i == lastChunk) {
+            size_t numCharsNew = lastChunkOffset / sizeof(char16_t) + 1;
+            MOZ_ASSERT(numCharsNew <= numChars);
+            numChars = numCharsNew;
         }
-
-        const char16_t* match(Compressed& c) {
-            if (const char16_t* decompressed = cx->caches.uncompressedSourceCache.lookup(&ss, holder))
-                return decompressed;
-
-            const size_t lengthWithNull = ss.length() + 1;
-            UniqueTwoByteChars decompressed(js_pod_malloc<char16_t>(lengthWithNull));
-            if (!decompressed) {
-                JS_ReportOutOfMemory(cx);
-                return nullptr;
-            }
-
-            if (!DecompressString((const unsigned char*) c.raw.chars(),
-                                  c.raw.length(),
-                                  reinterpret_cast<unsigned char*>(decompressed.get()),
-                                  lengthWithNull * sizeof(char16_t)))
-            {
-                JS_ReportOutOfMemory(cx);
-                return nullptr;
-            }
-
-            decompressed[ss.length()] = 0;
-
-            // Decompressing a huge script is expensive. With lazy parsing and
-            // relazification, this can happen repeatedly, so conservatively go
-            // back to storing the data uncompressed to avoid wasting too much
-            // time yo-yoing back and forth between compressed and uncompressed.
-            const size_t HUGE_SCRIPT = 5 * 1024 * 1024;
-            if (lengthWithNull > HUGE_SCRIPT) {
-                auto& strings = cx->runtime()->sharedImmutableStrings();
-                auto str = strings.getOrCreate(mozilla::Move(decompressed), ss.length());
-                if (!str) {
-                    JS_ReportOutOfMemory(cx);
-                    return nullptr;
-                }
-                ss.data = SourceType(Uncompressed(mozilla::Move(*str)));
-                return ss.data.as<Uncompressed>().string.chars();
-            }
-
-            const char16_t* ret = decompressed.get();
-            if (!cx->caches.uncompressedSourceCache.put(&ss, Move(decompressed), holder)) {
-                JS_ReportOutOfMemory(cx);
-                return nullptr;
-            }
-            return ret;
-        }
-
-        const char16_t* match(Missing&) {
-            MOZ_CRASH("ScriptSource::chars() on ScriptSource with SourceType = Missing");
-            return nullptr;
-        }
-    };
-
-    CharsMatcher cm(cx, *this, holder);
-    return data.match(cm);
+        mozilla::PodCopy(cursor, chars, numChars);
+        cursor += numChars;
+    }
+
+    *cursor++ = '\0';
+    MOZ_ASSERT(size_t(cursor - decompressed.get()) == lengthWithNull);
+
+    // Transfer ownership to |holder|.
+    const char16_t* ret = decompressed.get();
+    holder.holdChars(Move(decompressed));
+    return ret;
 }
 
 JSFlatString*
-ScriptSource::substring(JSContext* cx, uint32_t start, uint32_t stop)
+ScriptSource::substring(JSContext* cx, size_t start, size_t stop)
 {
     MOZ_ASSERT(start <= stop);
+    size_t len = stop - start;
     UncompressedSourceCache::AutoHoldEntry holder;
-    const char16_t* chars = this->chars(cx, holder);
+    const char16_t* chars = this->chars(cx, holder, start, len);
     if (!chars)
         return nullptr;
-    return NewStringCopyN<CanGC>(cx, chars + start, stop - start);
+    return NewStringCopyN<CanGC>(cx, chars, len);
 }
 
 JSFlatString*
-ScriptSource::substringDontDeflate(JSContext* cx, uint32_t start, uint32_t stop)
+ScriptSource::substringDontDeflate(JSContext* cx, size_t start, size_t stop)
 {
     MOZ_ASSERT(start <= stop);
+    size_t len = stop - start;
     UncompressedSourceCache::AutoHoldEntry holder;
-    const char16_t* chars = this->chars(cx, holder);
+    const char16_t* chars = this->chars(cx, holder, start, len);
     if (!chars)
         return nullptr;
-    return NewStringCopyNDontDeflate<CanGC>(cx, chars + start, stop - start);
+    return NewStringCopyNDontDeflate<CanGC>(cx, chars, len);
 }
 
 MOZ_MUST_USE bool
 ScriptSource::setSource(ExclusiveContext* cx,
                         mozilla::UniquePtr<char16_t[], JS::FreePolicy>&& source,
                         size_t length)
 {
     auto& cache = cx->zone()->runtimeFromAnyThread()->sharedImmutableStrings();
@@ -1746,51 +1802,57 @@ SourceCompressionTask::work()
     const char16_t* chars = ss->data.as<ScriptSource::Uncompressed>().string.chars();
     Compressor comp(reinterpret_cast<const unsigned char*>(chars),
                     inputBytes);
     if (!comp.init())
         return OOM;
 
     comp.setOutput(reinterpret_cast<unsigned char*>(compressed.get()), firstSize);
     bool cont = true;
+    bool reallocated = false;
     while (cont) {
         if (abort_)
             return Aborted;
 
         switch (comp.compressMore()) {
           case Compressor::CONTINUE:
             break;
           case Compressor::MOREOUTPUT: {
-            if (comp.outWritten() == inputBytes) {
+            if (reallocated) {
                 // The compressed string is longer than the original string.
                 return Aborted;
             }
 
             // The compressed output is greater than half the size of the
             // original string. Reallocate to the full size.
             if (!reallocUniquePtr(compressed, inputBytes))
                 return OOM;
 
             comp.setOutput(reinterpret_cast<unsigned char*>(compressed.get()), inputBytes);
+            reallocated = true;
             break;
           }
           case Compressor::DONE:
             cont = false;
             break;
           case Compressor::OOM:
             return OOM;
         }
     }
-    size_t compressedBytes = comp.outWritten();
+
+    size_t totalBytes = comp.totalBytesNeeded();
 
     // Shrink the buffer to the size of the compressed data.
-    mozilla::Unused << reallocUniquePtr(compressed, compressedBytes);
+    if (!reallocUniquePtr(compressed, totalBytes))
+        return OOM;
+
+    comp.finish(compressed.get(), totalBytes);
 
     auto& strings = cx->sharedImmutableStrings();
-    resultString = strings.getOrCreate(mozilla::Move(compressed), compressedBytes);
+    resultString = strings.getOrCreate(mozilla::Move(compressed), totalBytes);
     if (!resultString)
         return OOM;
 
     return Success;
 }
 
 void
 ScriptSource::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
@@ -4131,27 +4193,28 @@ LazyScriptHashPolicy::match(JSScript* sc
         script->sourceStart() != lazy->begin() ||
         script->sourceEnd() != lazy->end())
     {
         return false;
     }
 
     UncompressedSourceCache::AutoHoldEntry holder;
 
-    const char16_t* scriptChars = script->scriptSource()->chars(cx, holder);
+    size_t scriptBegin = script->sourceStart();
+    size_t length = script->sourceEnd() - scriptBegin;
+    const char16_t* scriptChars = script->scriptSource()->chars(cx, holder, scriptBegin, length);
     if (!scriptChars)
         return false;
 
-    const char16_t* lazyChars = lazy->scriptSource()->chars(cx, holder);
+    MOZ_ASSERT(scriptBegin == lazy->begin());
+    const char16_t* lazyChars = lazy->scriptSource()->chars(cx, holder, scriptBegin, length);
     if (!lazyChars)
         return false;
 
-    size_t begin = script->sourceStart();
-    size_t length = script->sourceEnd() - begin;
-    return !memcmp(scriptChars + begin, lazyChars + begin, length);
+    return !memcmp(scriptChars, lazyChars, length);
 }
 
 void
 JSScript::AutoDelazify::holdScript(JS::HandleFunction fun)
 {
     if (fun) {
         if (fun->compartment()->isSelfHosting) {
             // The self-hosting compartment is shared across runtimes, so we
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -269,56 +269,89 @@ class DebugScript
 
 typedef HashMap<JSScript*,
                 DebugScript*,
                 DefaultHasher<JSScript*>,
                 SystemAllocPolicy> DebugScriptMap;
 
 class ScriptSource;
 
+struct ScriptSourceChunk
+{
+    ScriptSource* ss;
+    uint32_t chunk;
+
+    ScriptSourceChunk()
+      : ss(nullptr), chunk(0)
+    {}
+    ScriptSourceChunk(ScriptSource* ss, uint32_t chunk)
+      : ss(ss), chunk(chunk)
+    {
+        MOZ_ASSERT(valid());;
+    }
+    bool valid() const { return ss != nullptr; }
+
+    bool operator==(const ScriptSourceChunk& other) const {
+        return ss == other.ss && chunk == other.chunk;
+    }
+};
+
+struct ScriptSourceChunkHasher
+{
+    using Lookup = ScriptSourceChunk;
+
+    static HashNumber hash(const ScriptSourceChunk& ssc) {
+        return mozilla::AddToHash(DefaultHasher<ScriptSource*>::hash(ssc.ss), ssc.chunk);
+    }
+    static bool match(const ScriptSourceChunk& c1, const ScriptSourceChunk& c2) {
+        return c1 == c2;
+    }
+};
+
 class UncompressedSourceCache
 {
-    typedef HashMap<ScriptSource*,
+    typedef HashMap<ScriptSourceChunk,
                     UniqueTwoByteChars,
-                    DefaultHasher<ScriptSource*>,
+                    ScriptSourceChunkHasher,
                     SystemAllocPolicy> Map;
 
   public:
     // Hold an entry in the source data cache and prevent it from being purged on GC.
     class AutoHoldEntry
     {
         UncompressedSourceCache* cache_;
-        ScriptSource* source_;
+        ScriptSourceChunk sourceChunk_;
         UniqueTwoByteChars charsToFree_;
       public:
         explicit AutoHoldEntry();
         ~AutoHoldEntry();
+        void holdChars(UniqueTwoByteChars chars);
       private:
-        void holdEntry(UncompressedSourceCache* cache, ScriptSource* source);
+        void holdEntry(UncompressedSourceCache* cache, const ScriptSourceChunk& sourceChunk);
         void deferDelete(UniqueTwoByteChars chars);
-        ScriptSource* source() const { return source_; }
+        const ScriptSourceChunk& sourceChunk() const { return sourceChunk_; }
         friend class UncompressedSourceCache;
     };
 
   private:
     UniquePtr<Map> map_;
     AutoHoldEntry* holder_;
 
   public:
     UncompressedSourceCache() : holder_(nullptr) {}
 
-    const char16_t* lookup(ScriptSource* ss, AutoHoldEntry& asp);
-    bool put(ScriptSource* ss, UniqueTwoByteChars chars, AutoHoldEntry& asp);
+    const char16_t* lookup(const ScriptSourceChunk& ssc, AutoHoldEntry& asp);
+    bool put(const ScriptSourceChunk& ssc, UniqueTwoByteChars chars, AutoHoldEntry& asp);
 
     void purge();
 
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
   private:
-    void holdEntry(AutoHoldEntry& holder, ScriptSource* ss);
+    void holdEntry(AutoHoldEntry& holder, const ScriptSourceChunk& ssc);
     void releaseEntry(AutoHoldEntry& holder);
 };
 
 class ScriptSource
 {
     friend struct SourceCompressionTask;
 
     uint32_t refs;
@@ -393,16 +426,19 @@ class ScriptSource
 
     // True if we can call JSRuntime::sourceHook to load the source on
     // demand. If sourceRetrievable_ and hasSourceData() are false, it is not
     // possible to get source at all.
     bool sourceRetrievable_:1;
     bool argumentsNotIncluded_:1;
     bool hasIntroductionOffset_:1;
 
+    const char16_t* chunkChars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& holder,
+                               size_t chunk);
+
   public:
     explicit ScriptSource()
       : refs(0),
         data(SourceType(Missing())),
         filename_(nullptr),
         displayURL_(nullptr),
         sourceMapURL_(nullptr),
         mutedErrors_(false),
@@ -455,19 +491,24 @@ class ScriptSource
         MOZ_ASSERT(hasSourceData());
         return data.match(LengthMatcher());
     }
 
     bool argumentsNotIncluded() const {
         MOZ_ASSERT(hasSourceData());
         return argumentsNotIncluded_;
     }
-    const char16_t* chars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& asp);
-    JSFlatString* substring(JSContext* cx, uint32_t start, uint32_t stop);
-    JSFlatString* substringDontDeflate(JSContext* cx, uint32_t start, uint32_t stop);
+
+    // Return a string containing the chars starting at |begin| and ending at
+    // |begin + len|.
+    const char16_t* chars(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& asp,
+                          size_t begin, size_t len);
+
+    JSFlatString* substring(JSContext* cx, size_t start, size_t stop);
+    JSFlatString* substringDontDeflate(JSContext* cx, size_t start, size_t stop);
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 JS::ScriptSourceInfo* info) const;
 
     MOZ_MUST_USE bool setSource(ExclusiveContext* cx,
                                 mozilla::UniquePtr<char16_t[], JS::FreePolicy>&& source,
                                 size_t length);
     void setSource(SharedImmutableTwoByteString&& string);
 
--- a/js/src/vm/Compression.cpp
+++ b/js/src/vm/Compression.cpp
@@ -2,16 +2,20 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "vm/Compression.h"
 
 #include "mozilla/MemoryChecking.h"
+#include "mozilla/PodOperations.h"
+
+#include "jsutil.h"
+
 #include "js/Utility.h"
 
 using namespace js;
 
 static void*
 zlib_alloc(void* cx, uInt items, uInt size)
 {
     return js_calloc(items, size);
@@ -21,51 +25,59 @@ static void
 zlib_free(void* cx, void* addr)
 {
     js_free(addr);
 }
 
 Compressor::Compressor(const unsigned char* inp, size_t inplen)
     : inp(inp),
       inplen(inplen),
-      outbytes(0),
-      initialized(false)
+      initialized(false),
+      currentChunkSize(0),
+      chunkOffsets()
 {
     MOZ_ASSERT(inplen > 0);
     zs.opaque = nullptr;
     zs.next_in = (Bytef*)inp;
     zs.avail_in = 0;
     zs.next_out = nullptr;
     zs.avail_out = 0;
     zs.zalloc = zlib_alloc;
     zs.zfree = zlib_free;
+
+    // Reserve space for the CompressedDataHeader.
+    outbytes = sizeof(CompressedDataHeader);
 }
 
-
 Compressor::~Compressor()
 {
     if (initialized) {
         int ret = deflateEnd(&zs);
         if (ret != Z_OK) {
             // If we finished early, we can get a Z_DATA_ERROR.
             MOZ_ASSERT(ret == Z_DATA_ERROR);
             MOZ_ASSERT(uInt(zs.next_in - inp) < inplen || !zs.avail_out);
         }
     }
 }
 
+// According to the zlib docs, the default value for windowBits is 15. Passing
+// -15 is treated the same, but it also forces 'raw deflate' (no zlib header or
+// trailer). Raw deflate is necessary for chunked decompression.
+static const int WindowBits = -15;
+
 bool
 Compressor::init()
 {
     if (inplen >= UINT32_MAX)
         return false;
     // zlib is slow and we'd rather be done compression sooner
     // even if it means decompression is slower which penalizes
     // Function.toString()
-    int ret = deflateInit(&zs, Z_BEST_SPEED);
+    int ret = deflateInit2(&zs, Z_BEST_SPEED, Z_DEFLATED, WindowBits, 8, Z_DEFAULT_STRATEGY);
     if (ret != Z_OK) {
         MOZ_ASSERT(ret == Z_MEM_ERROR);
         return false;
     }
     initialized = true;
     return true;
 }
 
@@ -77,37 +89,88 @@ Compressor::setOutput(unsigned char* out
     zs.avail_out = outlen - outbytes;
 }
 
 Compressor::Status
 Compressor::compressMore()
 {
     MOZ_ASSERT(zs.next_out);
     uInt left = inplen - (zs.next_in - inp);
-    bool done = left <= CHUNKSIZE;
+    bool done = left <= MAX_INPUT_SIZE;
     if (done)
         zs.avail_in = left;
     else if (zs.avail_in == 0)
-        zs.avail_in = CHUNKSIZE;
+        zs.avail_in = MAX_INPUT_SIZE;
+
+    // Finish the current chunk if needed.
+    bool flush = false;
+    MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
+    if (currentChunkSize + zs.avail_in >= CHUNK_SIZE) {
+        // Adjust avail_in, so we don't get chunks that are larger than
+        // CHUNK_SIZE.
+        zs.avail_in = CHUNK_SIZE - currentChunkSize;
+        MOZ_ASSERT(currentChunkSize + zs.avail_in == CHUNK_SIZE);
+        flush = true;
+    }
+
+    Bytef* oldin = zs.next_in;
     Bytef* oldout = zs.next_out;
-    int ret = deflate(&zs, done ? Z_FINISH : Z_NO_FLUSH);
+    int ret = deflate(&zs, done ? Z_FINISH : (flush ? Z_FULL_FLUSH : Z_NO_FLUSH));
     outbytes += zs.next_out - oldout;
+    currentChunkSize += zs.next_in - oldin;
+    MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
+
     if (ret == Z_MEM_ERROR) {
         zs.avail_out = 0;
         return OOM;
     }
     if (ret == Z_BUF_ERROR || (done && ret == Z_OK)) {
         MOZ_ASSERT(zs.avail_out == 0);
         return MOREOUTPUT;
     }
+
+    if (done || currentChunkSize == CHUNK_SIZE) {
+        MOZ_ASSERT_IF(!done, flush);
+        MOZ_ASSERT(chunkSize(inplen, chunkOffsets.length()) == currentChunkSize);
+        if (!chunkOffsets.append(outbytes))
+            return OOM;
+        currentChunkSize = 0;
+        MOZ_ASSERT_IF(done, chunkOffsets.length() == (inplen - 1) / CHUNK_SIZE + 1);
+    }
+
     MOZ_ASSERT_IF(!done, ret == Z_OK);
     MOZ_ASSERT_IF(done, ret == Z_STREAM_END);
     return done ? DONE : CONTINUE;
 }
 
+size_t
+Compressor::totalBytesNeeded() const
+{
+    return AlignBytes(outbytes, sizeof(uint32_t)) + sizeOfChunkOffsets();
+}
+
+void
+Compressor::finish(char* dest, size_t destBytes) const
+{
+    MOZ_ASSERT(!chunkOffsets.empty());
+
+    CompressedDataHeader* compressedHeader = reinterpret_cast<CompressedDataHeader*>(dest);
+    compressedHeader->compressedBytes = outbytes;
+
+    size_t outbytesAligned = AlignBytes(outbytes, sizeof(uint32_t));
+
+    // Zero the padding bytes, the ImmutableStringsCache will hash them.
+    mozilla::PodZero(dest + outbytes, outbytesAligned - outbytes);
+
+    uint32_t* destArr = reinterpret_cast<uint32_t*>(dest + outbytesAligned);
+
+    MOZ_ASSERT(uintptr_t(dest + destBytes) == uintptr_t(destArr + chunkOffsets.length()));
+    mozilla::PodCopy(destArr, chunkOffsets.begin(), chunkOffsets.length());
+}
+
 bool
 js::DecompressString(const unsigned char* inp, size_t inplen, unsigned char* out, size_t outlen)
 {
     MOZ_ASSERT(inplen <= UINT32_MAX);
 
     // Mark the memory we pass to zlib as initialized for MSan.
     MOZ_MAKE_MEM_DEFINED(out, outlen);
 
@@ -126,8 +189,60 @@ js::DecompressString(const unsigned char
         return false;
     }
     ret = inflate(&zs, Z_FINISH);
     MOZ_ASSERT(ret == Z_STREAM_END);
     ret = inflateEnd(&zs);
     MOZ_ASSERT(ret == Z_OK);
     return true;
 }
+
+bool
+js::DecompressStringChunk(const unsigned char* inp, size_t chunk,
+                          unsigned char* out, size_t outlen)
+{
+    MOZ_ASSERT(outlen <= Compressor::CHUNK_SIZE);
+
+    const CompressedDataHeader* header = reinterpret_cast<const CompressedDataHeader*>(inp);
+
+    size_t compressedBytes = header->compressedBytes;
+    size_t compressedBytesAligned = AlignBytes(compressedBytes, sizeof(uint32_t));
+
+    const unsigned char* offsetBytes = inp + compressedBytesAligned;
+    const uint32_t* offsets = reinterpret_cast<const uint32_t*>(offsetBytes);
+
+    uint32_t compressedStart = chunk > 0 ? offsets[chunk - 1] : sizeof(CompressedDataHeader);
+    uint32_t compressedEnd = offsets[chunk];
+
+    MOZ_ASSERT(compressedStart < compressedEnd);
+    MOZ_ASSERT(compressedEnd <= compressedBytes);
+
+    bool lastChunk = compressedEnd == compressedBytes;
+
+    // Mark the memory we pass to zlib as initialized for MSan.
+    MOZ_MAKE_MEM_DEFINED(out, outlen);
+
+    z_stream zs;
+    zs.zalloc = zlib_alloc;
+    zs.zfree = zlib_free;
+    zs.opaque = nullptr;
+    zs.next_in = (Bytef*)(inp + compressedStart);
+    zs.avail_in = compressedEnd - compressedStart;
+    zs.next_out = out;
+    MOZ_ASSERT(outlen);
+    zs.avail_out = outlen;
+    int ret = inflateInit2(&zs, WindowBits);
+    if (ret != Z_OK) {
+        MOZ_ASSERT(ret == Z_MEM_ERROR);
+        return false;
+    }
+    if (lastChunk) {
+        ret = inflate(&zs, Z_FINISH);
+        MOZ_ASSERT(ret == Z_STREAM_END);
+    } else {
+        ret = inflate(&zs, Z_NO_FLUSH);
+        MOZ_ASSERT(ret == Z_OK);
+    }
+    MOZ_ASSERT(zs.avail_in == 0);
+    ret = inflateEnd(&zs);
+    MOZ_ASSERT(ret == Z_OK);
+    return true;
+}
--- a/js/src/vm/Compression.h
+++ b/js/src/vm/Compression.h
@@ -4,49 +4,100 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_Compression_h
 #define vm_Compression_h
 
 #include <zlib.h>
 
+#include "jsalloc.h"
 #include "jstypes.h"
 
+#include "js/Vector.h"
+
 namespace js {
 
+struct CompressedDataHeader
+{
+    uint32_t compressedBytes;
+};
+
 class Compressor
 {
-    /* Number of bytes we should hand to zlib each compressMore() call. */
-    static const size_t CHUNKSIZE = 2048;
+  public:
+    // After compressing CHUNK_SIZE bytes, we will do a full flush so we can
+    // start decompression at that point.
+    static const size_t CHUNK_SIZE = 64 * 1024;
+
+  private:
+    // Number of bytes we should hand to zlib each compressMore() call.
+    static const size_t MAX_INPUT_SIZE = 2 * 1024;
+
     z_stream zs;
     const unsigned char* inp;
     size_t inplen;
     size_t outbytes;
     bool initialized;
 
+    // The number of uncompressed bytes written for the current chunk. When this
+    // reaches CHUNK_SIZE, we finish the current chunk and start a new chunk.
+    uint32_t currentChunkSize;
+
+    // At the end of each chunk (and the end of the uncompressed data if it's
+    // not a chunk boundary), we record the offset in the compressed data.
+    js::Vector<uint32_t, 8, SystemAllocPolicy> chunkOffsets;
+
   public:
     enum Status {
         MOREOUTPUT,
         DONE,
         CONTINUE,
         OOM
     };
 
     Compressor(const unsigned char* inp, size_t inplen);
     ~Compressor();
     bool init();
     void setOutput(unsigned char* out, size_t outlen);
-    size_t outWritten() const { return outbytes; }
     /* Compress some of the input. Return true if it should be called again. */
     Status compressMore();
+    size_t sizeOfChunkOffsets() const { return chunkOffsets.length() * sizeof(chunkOffsets[0]); }
+
+    // Returns the number of bytes needed to store the data currently written +
+    // the chunk offsets.
+    size_t totalBytesNeeded() const;
+
+    // Append the chunk offsets to |dest|.
+    void finish(char* dest, size_t destBytes) const;
+
+    static void toChunkOffset(size_t uncompressedOffset, size_t* chunk, size_t* chunkOffset) {
+        *chunk = uncompressedOffset / CHUNK_SIZE;
+        *chunkOffset = uncompressedOffset % CHUNK_SIZE;
+    }
+    static size_t chunkSize(size_t uncompressedBytes, size_t chunk) {
+        MOZ_ASSERT(uncompressedBytes > 0);
+        size_t lastChunk = (uncompressedBytes - 1) / CHUNK_SIZE;
+        MOZ_ASSERT(chunk <= lastChunk);
+        if (chunk < lastChunk || uncompressedBytes % CHUNK_SIZE == 0)
+            return CHUNK_SIZE;
+        return uncompressedBytes % CHUNK_SIZE;
+    }
 };
 
 /*
  * Decompress a string. The caller must know the length of the output and
  * allocate |out| to a string of that length.
  */
 bool DecompressString(const unsigned char* inp, size_t inplen,
                       unsigned char* out, size_t outlen);
 
+/*
+ * Decompress a single chunk of at most Compressor::CHUNK_SIZE bytes.
+ * |chunk| is the chunk index. The caller must know the length of the output
+ * (the uncompressed chunk) and allocate |out| to a string of that length.
+ */
+bool DecompressStringChunk(const unsigned char* inp, size_t chunk,
+                           unsigned char* out, size_t outlen);
+
 } /* namespace js */
 
 #endif /* vm_Compression_h */