Merge inbound to mozilla-central. a=merge PRE_TREEWIDE_CLANG_FORMAT
authorNoemi Erli <nerli@mozilla.com>
Fri, 30 Nov 2018 11:35:05 +0200
changeset 448945 58a0412e1557
parent 448936 c5b713000513 (current diff)
parent 448944 8e424cc86558 (diff)
child 448946 2ac59ec6f8de
child 448955 9ae8864b61d0
push id35130
push usernerli@mozilla.com
push dateFri, 30 Nov 2018 09:35:34 +0000
treeherdermozilla-central@58a0412e1557 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone65.0a1
first release with
nightly linux32
58a0412e1557 / 65.0a1 / 20181130093534 / files
nightly linux64
58a0412e1557 / 65.0a1 / 20181130093534 / files
nightly mac
58a0412e1557 / 65.0a1 / 20181130093534 / files
nightly win32
58a0412e1557 / 65.0a1 / 20181130093534 / files
nightly win64
58a0412e1557 / 65.0a1 / 20181130093534 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
.clang-format-ignore
tools/rewriting/ThirdPartyPaths.txt
--- a/.clang-format-ignore
+++ b/.clang-format-ignore
@@ -178,15 +178,16 @@ third_party/aom/.*
 third_party/msgpack/.*
 third_party/prio/.*
 third_party/python/.*
 third_party/rust/.*
 toolkit/components/jsoncpp/.*
 toolkit/components/protobuf/.*
 toolkit/components/url-classifier/chromium/.*
 toolkit/components/url-classifier/protobuf/.*
+toolkit/crashreporter/breakpad-client/.*
 toolkit/crashreporter/google-breakpad/.*
 toolkit/recordreplay/udis86/.*
 tools/fuzzing/libfuzzer/.*
 tools/profiler/core/vtune/.*
 xpcom/build/mach_override.c
 xpcom/build/mach_override.h
 xpcom/io/crc32c.c
new file mode 100644
--- /dev/null
+++ b/.hg-annotate-ignore-revs
@@ -0,0 +1,1 @@
+0ceae9db9ec0be18daa1a279511ad305723185d4 Bug 1204606 - Sylvestre Ledru - Reformat of dom/media. r=jya
--- a/accessible/generic/RootAccessible.cpp
+++ b/accessible/generic/RootAccessible.cpp
@@ -269,16 +269,21 @@ RootAccessible::ProcessDOMEvent(Event* a
   nsAutoString eventType;
   aDOMEvent->GetType(eventType);
 
 #ifdef A11Y_LOG
   if (logging::IsEnabled(logging::eDOMEvents))
     logging::DOMEvent("processed", origTargetNode, eventType);
 #endif
 
+  if (!origTargetNode) {
+    // Original target has ceased to exist.
+    return;
+  }
+
   if (eventType.EqualsLiteral("popuphiding")) {
     HandlePopupHidingEvent(origTargetNode);
     return;
   }
 
   DocAccessible* targetDocument = GetAccService()->
     GetDocAccessible(origTargetNode->OwnerDoc());
   if (!targetDocument) {
--- a/js/src/gdb/lib-for-tests/prologue.py
+++ b/js/src/gdb/lib-for-tests/prologue.py
@@ -9,17 +9,17 @@ import traceback
 sys.path[0:0] = [testlibdir]
 
 active_fragment = None
 
 # Run the C++ fragment named |fragment|, stopping on entry to |function|
 # ('breakpoint', by default) and then select the calling frame.
 
 
-def run_fragment(fragment, function='breakpoint'):
+def run_fragment(fragment, function='gdb-tests.cpp:breakpoint'):
     # Arrange to stop at a reasonable place in the test program.
     bp = gdb.Breakpoint(function)
     try:
         gdb.execute("run %s" % (fragment,))
         # Check that we did indeed stop by hitting the breakpoint we set.
         assert bp.hit_count == 1
     finally:
         bp.delete()
--- a/js/src/vm/Compression.h
+++ b/js/src/vm/Compression.h
@@ -65,19 +65,30 @@ class Compressor
 
     // Returns the number of bytes needed to store the data currently written +
     // the chunk offsets.
     size_t totalBytesNeeded() const;
 
     // Append the chunk offsets to |dest|.
     void finish(char* dest, size_t destBytes);
 
-    static void toChunkOffset(size_t uncompressedOffset, size_t* chunk, size_t* chunkOffset) {
-        *chunk = uncompressedOffset / CHUNK_SIZE;
-        *chunkOffset = uncompressedOffset % CHUNK_SIZE;
+    static void
+    rangeToChunkAndOffset(size_t uncompressedStart, size_t uncompressedLimit,
+                          size_t* firstChunk, size_t* firstChunkOffset, size_t* firstChunkSize,
+                          size_t* lastChunk, size_t* lastChunkSize)
+    {
+        *firstChunk = uncompressedStart / CHUNK_SIZE;
+        *firstChunkOffset = uncompressedStart % CHUNK_SIZE;
+        *firstChunkSize = CHUNK_SIZE - *firstChunkOffset;
+
+        MOZ_ASSERT(uncompressedStart < uncompressedLimit,
+                   "subtraction below requires a non-empty range");
+
+        *lastChunk = (uncompressedLimit - 1) / CHUNK_SIZE;
+        *lastChunkSize = ((uncompressedLimit - 1) % CHUNK_SIZE) + 1;
     }
 
     static size_t chunkSize(size_t uncompressedBytes, size_t chunk) {
         MOZ_ASSERT(uncompressedBytes > 0,
                    "must have uncompressed data to chunk");
 
         size_t startOfChunkBytes = chunk * CHUNK_SIZE;
         MOZ_ASSERT(startOfChunkBytes < uncompressedBytes,
--- a/js/src/vm/JSScript.cpp
+++ b/js/src/vm/JSScript.cpp
@@ -1698,70 +1698,87 @@ ScriptSource::units(JSContext* cx, Uncom
     }
 
     if (data.is<Missing>()) {
         MOZ_CRASH("ScriptSource::units() on ScriptSource with SourceType = Missing");
     }
 
     MOZ_ASSERT(data.is<Compressed<Unit>>());
 
-    // Determine which chunk(s) we are interested in, and the offsets within
-    // these chunks.
-    size_t firstChunk, lastChunk;
-    size_t firstChunkOffset, lastChunkOffset;
-    MOZ_ASSERT(len > 0);
-    Compressor::toChunkOffset(begin * sizeof(Unit), &firstChunk, &firstChunkOffset);
-    Compressor::toChunkOffset((begin + len) * sizeof(Unit), &lastChunk, &lastChunkOffset);
-
+    // Determine first/last chunks, the offset (in bytes) into the first chunk
+    // of the requested units, and the number of bytes in the last chunk.
+    //
+    // Note that first and last chunk sizes are miscomputed and *must not be
+    // used* when the first chunk is the last chunk.
+    size_t firstChunk, firstChunkOffset, firstChunkSize;
+    size_t lastChunk, lastChunkSize;
+    Compressor::rangeToChunkAndOffset(begin * sizeof(Unit), (begin + len) * sizeof(Unit),
+                                      &firstChunk, &firstChunkOffset, &firstChunkSize,
+                                      &lastChunk, &lastChunkSize);
+    MOZ_ASSERT(firstChunk <= lastChunk);
     MOZ_ASSERT(firstChunkOffset % sizeof(Unit) == 0);
+    MOZ_ASSERT(firstChunkSize % sizeof(Unit) == 0);
+
     size_t firstUnit = firstChunkOffset / sizeof(Unit);
 
+    // Directly return units within a single chunk.  UncompressedSourceCache
+    // and |holder| will hold the units alive past function return.
     if (firstChunk == lastChunk) {
         const Unit* units = chunkUnits<Unit>(cx, holder, firstChunk);
         if (!units) {
             return nullptr;
         }
 
         return units + firstUnit;
     }
 
-    // We need multiple chunks. Allocate a buffer to hold |len| units and copy
-    // uncompressed units from the chunks into it.  We use chunkUnits() so we
-    // benefit from chunk caching by UncompressedSourceCache.
-
-    MOZ_ASSERT(firstChunk < lastChunk);
-
+    // Otherwise the units span multiple chunks.  Copy successive chunks'
+    // decompressed units into freshly-allocated memory to return.
     EntryUnits<Unit> decompressed(js_pod_malloc<Unit>(len));
     if (!decompressed) {
         JS_ReportOutOfMemory(cx);
         return nullptr;
     }
 
-    size_t totalLengthInBytes = length() * sizeof(Unit);
-    Unit* cursor = decompressed.get();
-
-    for (size_t i = firstChunk; i <= lastChunk; i++) {
+    Unit* cursor;
+
+    {
+        // |AutoHoldEntry| is single-shot, and a holder successfully filled in
+        // by |chunkUnits| must be destroyed before another can be used.  Thus
+        // we can't use |holder| with |chunkUnits| when |chunkUnits| is used
+        // with multiple chunks, and we must use and destroy distinct, fresh
+        // holders for each chunk.
+        UncompressedSourceCache::AutoHoldEntry firstHolder;
+        const Unit* units = chunkUnits<Unit>(cx, firstHolder, firstChunk);
+        if (!units) {
+            return nullptr;
+        }
+
+        cursor = std::copy_n(units + firstUnit, firstChunkSize / sizeof(Unit),
+                             decompressed.get());
+    }
+
+    for (size_t i = firstChunk + 1; i < lastChunk; i++) {
         UncompressedSourceCache::AutoHoldEntry chunkHolder;
         const Unit* units = chunkUnits<Unit>(cx, chunkHolder, i);
         if (!units) {
             return nullptr;
         }
 
-        size_t numUnits = Compressor::chunkSize(totalLengthInBytes, i) / sizeof(Unit);
-        if (i == firstChunk) {
-            MOZ_ASSERT(firstUnit < numUnits);
-            units += firstUnit;
-            numUnits -= firstUnit;
-        } else if (i == lastChunk) {
-            size_t numUnitsNew = lastChunkOffset / sizeof(Unit);
-            MOZ_ASSERT(numUnitsNew <= numUnits);
-            numUnits = numUnitsNew;
+        cursor = std::copy_n(units, Compressor::CHUNK_SIZE / sizeof(Unit), cursor);
+    }
+
+    {
+        UncompressedSourceCache::AutoHoldEntry lastHolder;
+        const Unit* units = chunkUnits<Unit>(cx, lastHolder, lastChunk);
+        if (!units) {
+            return nullptr;
         }
-        mozilla::PodCopy(cursor, units, numUnits);
-        cursor += numUnits;
+
+        cursor = std::copy_n(units, lastChunkSize / sizeof(Unit), cursor);
     }
 
     MOZ_ASSERT(PointerRangeSize(decompressed.get(), cursor) == len);
 
     // Transfer ownership to |holder|.
     const Unit* ret = decompressed.get();
     holder.holdUnits(std::move(decompressed));
     return ret;
--- a/tools/rewriting/ThirdPartyPaths.txt
+++ b/tools/rewriting/ThirdPartyPaths.txt
@@ -105,15 +105,16 @@ third_party/aom/
 third_party/msgpack/
 third_party/prio/
 third_party/python/
 third_party/rust/
 toolkit/components/jsoncpp/
 toolkit/components/protobuf/
 toolkit/components/url-classifier/chromium/
 toolkit/components/url-classifier/protobuf/
+toolkit/crashreporter/breakpad-client/
 toolkit/crashreporter/google-breakpad/
 toolkit/recordreplay/udis86/
 tools/fuzzing/libfuzzer/
 tools/profiler/core/vtune/
 xpcom/build/mach_override.c
 xpcom/build/mach_override.h
 xpcom/io/crc32c.c