Bug 1506969. r=tcampbell
authorJeff Walden <jwalden@mit.edu>
Mon, 26 Nov 2018 13:12:29 -0800
changeset 508154 b26e28ce8cfebb91f1c5764606585b8b670938ff
parent 508153 203424f500da8a1f759ee8be89d3274567926a3f
child 508155 2535c232ed6e9109ae790d3b795b6f746505d5c8
push id1905
push userffxbld-merge
push dateMon, 21 Jan 2019 12:33:13 +0000
treeherdermozilla-release@c2fca1944d8c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstcampbell
bugs1506969
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1506969. r=tcampbell
js/src/vm/Compression.h
js/src/vm/JSScript.cpp
--- a/js/src/vm/Compression.h
+++ b/js/src/vm/Compression.h
@@ -65,19 +65,30 @@ class Compressor
 
     // Returns the number of bytes needed to store the data currently written +
     // the chunk offsets.
     size_t totalBytesNeeded() const;
 
     // Append the chunk offsets to |dest|.
     void finish(char* dest, size_t destBytes);
 
-    static void toChunkOffset(size_t uncompressedOffset, size_t* chunk, size_t* chunkOffset) {
-        *chunk = uncompressedOffset / CHUNK_SIZE;
-        *chunkOffset = uncompressedOffset % CHUNK_SIZE;
+    static void
+    rangeToChunkAndOffset(size_t uncompressedStart, size_t uncompressedLimit,
+                          size_t* firstChunk, size_t* firstChunkOffset, size_t* firstChunkSize,
+                          size_t* lastChunk, size_t* lastChunkSize)
+    {
+        *firstChunk = uncompressedStart / CHUNK_SIZE;
+        *firstChunkOffset = uncompressedStart % CHUNK_SIZE;
+        *firstChunkSize = CHUNK_SIZE - *firstChunkOffset;
+
+        MOZ_ASSERT(uncompressedStart < uncompressedLimit,
+                   "subtraction below requires a non-empty range");
+
+        *lastChunk = (uncompressedLimit - 1) / CHUNK_SIZE;
+        *lastChunkSize = ((uncompressedLimit - 1) % CHUNK_SIZE) + 1;
     }
 
     static size_t chunkSize(size_t uncompressedBytes, size_t chunk) {
         MOZ_ASSERT(uncompressedBytes > 0,
                    "must have uncompressed data to chunk");
 
         size_t startOfChunkBytes = chunk * CHUNK_SIZE;
         MOZ_ASSERT(startOfChunkBytes < uncompressedBytes,
--- a/js/src/vm/JSScript.cpp
+++ b/js/src/vm/JSScript.cpp
@@ -1698,70 +1698,87 @@ ScriptSource::units(JSContext* cx, Uncom
     }
 
     if (data.is<Missing>()) {
         MOZ_CRASH("ScriptSource::units() on ScriptSource with SourceType = Missing");
     }
 
     MOZ_ASSERT(data.is<Compressed<Unit>>());
 
-    // Determine which chunk(s) we are interested in, and the offsets within
-    // these chunks.
-    size_t firstChunk, lastChunk;
-    size_t firstChunkOffset, lastChunkOffset;
-    MOZ_ASSERT(len > 0);
-    Compressor::toChunkOffset(begin * sizeof(Unit), &firstChunk, &firstChunkOffset);
-    Compressor::toChunkOffset((begin + len) * sizeof(Unit), &lastChunk, &lastChunkOffset);
-
+    // Determine first/last chunks, the offset (in bytes) into the first chunk
+    // of the requested units, and the number of bytes in the last chunk.
+    //
+    // Note that first and last chunk sizes are miscomputed and *must not be
+    // used* when the first chunk is the last chunk.
+    size_t firstChunk, firstChunkOffset, firstChunkSize;
+    size_t lastChunk, lastChunkSize;
+    Compressor::rangeToChunkAndOffset(begin * sizeof(Unit), (begin + len) * sizeof(Unit),
+                                      &firstChunk, &firstChunkOffset, &firstChunkSize,
+                                      &lastChunk, &lastChunkSize);
+    MOZ_ASSERT(firstChunk <= lastChunk);
     MOZ_ASSERT(firstChunkOffset % sizeof(Unit) == 0);
+    MOZ_ASSERT(firstChunkSize % sizeof(Unit) == 0);
+
     size_t firstUnit = firstChunkOffset / sizeof(Unit);
 
+    // Directly return units within a single chunk.  UncompressedSourceCache
+    // and |holder| will hold the units alive past function return.
     if (firstChunk == lastChunk) {
         const Unit* units = chunkUnits<Unit>(cx, holder, firstChunk);
         if (!units) {
             return nullptr;
         }
 
         return units + firstUnit;
     }
 
-    // We need multiple chunks. Allocate a buffer to hold |len| units and copy
-    // uncompressed units from the chunks into it.  We use chunkUnits() so we
-    // benefit from chunk caching by UncompressedSourceCache.
-
-    MOZ_ASSERT(firstChunk < lastChunk);
-
+    // Otherwise the units span multiple chunks.  Copy successive chunks'
+    // decompressed units into freshly-allocated memory to return.
     EntryUnits<Unit> decompressed(js_pod_malloc<Unit>(len));
     if (!decompressed) {
         JS_ReportOutOfMemory(cx);
         return nullptr;
     }
 
-    size_t totalLengthInBytes = length() * sizeof(Unit);
-    Unit* cursor = decompressed.get();
-
-    for (size_t i = firstChunk; i <= lastChunk; i++) {
+    Unit* cursor;
+
+    {
+        // |AutoHoldEntry| is single-shot, and a holder successfully filled in
+        // by |chunkUnits| must be destroyed before another can be used.  Thus
+        // we can't use |holder| with |chunkUnits| when |chunkUnits| is used
+        // with multiple chunks, and we must use and destroy distinct, fresh
+        // holders for each chunk.
+        UncompressedSourceCache::AutoHoldEntry firstHolder;
+        const Unit* units = chunkUnits<Unit>(cx, firstHolder, firstChunk);
+        if (!units) {
+            return nullptr;
+        }
+
+        cursor = std::copy_n(units + firstUnit, firstChunkSize / sizeof(Unit),
+                             decompressed.get());
+    }
+
+    for (size_t i = firstChunk + 1; i < lastChunk; i++) {
         UncompressedSourceCache::AutoHoldEntry chunkHolder;
         const Unit* units = chunkUnits<Unit>(cx, chunkHolder, i);
         if (!units) {
             return nullptr;
         }
 
-        size_t numUnits = Compressor::chunkSize(totalLengthInBytes, i) / sizeof(Unit);
-        if (i == firstChunk) {
-            MOZ_ASSERT(firstUnit < numUnits);
-            units += firstUnit;
-            numUnits -= firstUnit;
-        } else if (i == lastChunk) {
-            size_t numUnitsNew = lastChunkOffset / sizeof(Unit);
-            MOZ_ASSERT(numUnitsNew <= numUnits);
-            numUnits = numUnitsNew;
+        cursor = std::copy_n(units, Compressor::CHUNK_SIZE / sizeof(Unit), cursor);
+    }
+
+    {
+        UncompressedSourceCache::AutoHoldEntry lastHolder;
+        const Unit* units = chunkUnits<Unit>(cx, lastHolder, lastChunk);
+        if (!units) {
+            return nullptr;
         }
-        mozilla::PodCopy(cursor, units, numUnits);
-        cursor += numUnits;
+
+        cursor = std::copy_n(units, lastChunkSize / sizeof(Unit), cursor);
     }
 
     MOZ_ASSERT(PointerRangeSize(decompressed.get(), cursor) == len);
 
     // Transfer ownership to |holder|.
     const Unit* ret = decompressed.get();
     holder.holdUnits(std::move(decompressed));
     return ret;