Bug 1338374 - Shrink Vector from (usually) four pointers in size to three when no inline storage is used. r=froydnj
authorJeff Walden <jwalden@mit.edu>
Mon, 30 Jan 2017 15:56:05 -0800
changeset 344352 37acaeb307f19a98e37afc5c8ecfbc567abd42f4
parent 344351 9f6b33bc6c2b2d422161dfee5d9dabfa34277ecd
child 344353 5ddf5a0b85ae860b8c9616f516980e545ac358cc
push id87335
push userjwalden@mit.edu
push dateWed, 22 Feb 2017 23:20:21 +0000
treeherdermozilla-inbound@5ddf5a0b85ae [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1338374
milestone54.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1338374 - Shrink Vector from (usually) four pointers in size to three when no inline storage is used. r=froydnj
mfbt/Vector.h
mfbt/tests/TestVector.cpp
--- a/mfbt/Vector.h
+++ b/mfbt/Vector.h
@@ -142,17 +142,17 @@ struct VectorImpl
     T* src = aV.beginNoCheck();
     for (; src < aV.endNoCheck(); ++dst, ++src) {
       new_(dst, Move(*src));
     }
     VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
     aV.free_(aV.mBegin);
     aV.mBegin = newbuf;
     /* aV.mLength is unchanged. */
-    aV.mCapacity = aNewCap;
+    aV.mTail.mCapacity = aNewCap;
     return true;
   }
 };
 
 /*
  * This partial template specialization provides a default implementation for
  * vector operations when the element type is known to be a POD, as judged by
  * IsPod.
@@ -221,38 +221,40 @@ struct VectorImpl<T, N, AP, true>
     }
   }
 
   static inline MOZ_MUST_USE bool
   growTo(Vector<T, N, AP>& aV, size_t aNewCap)
   {
     MOZ_ASSERT(!aV.usingInlineStorage());
     MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
-    T* newbuf = aV.template pod_realloc<T>(aV.mBegin, aV.mCapacity, aNewCap);
+    T* newbuf =
+      aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
     if (MOZ_UNLIKELY(!newbuf)) {
       return false;
     }
     aV.mBegin = newbuf;
     /* aV.mLength is unchanged. */
-    aV.mCapacity = aNewCap;
+    aV.mTail.mCapacity = aNewCap;
     return true;
   }
 
   static inline void
   podResizeToFit(Vector<T, N, AP>& aV)
   {
-    if (aV.usingInlineStorage() || aV.mLength == aV.mCapacity) {
+    if (aV.usingInlineStorage() || aV.mLength == aV.mTail.mCapacity) {
       return;
     }
-    T* newbuf = aV.template pod_realloc<T>(aV.mBegin, aV.mCapacity, aV.mLength);
+    T* newbuf =
+      aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aV.mLength);
     if (MOZ_UNLIKELY(!newbuf)) {
       return;
     }
     aV.mBegin = newbuf;
-    aV.mCapacity = aV.mLength;
+    aV.mTail.mCapacity = aV.mLength;
   }
 };
 
 // A struct for TestVector.cpp to access private internal fields.
 // DO NOT DEFINE IN YOUR OWN CODE.
 struct VectorTesting;
 
 } // namespace detail
@@ -338,68 +340,110 @@ class MOZ_NON_PARAM Vector final : priva
    * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
    * previously allocated by a call to reserve().
    */
   T* mBegin;
 
   /* Number of elements in the vector. */
   size_t mLength;
 
-  /* Max number of elements storable in the vector without resizing. */
-  size_t mCapacity;
+  /*
+   * Memory used to store capacity, reserved element count (debug builds only),
+   * and inline storage.  The simple "answer" is:
+   *
+   *   size_t mCapacity;
+   *   #ifdef DEBUG
+   *   size_t mReserved;
+   *   #endif
+   *   alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
+   *
+   * but there are complications.  First, C++ forbids zero-sized arrays that
+   * might result.  Second, we don't want zero capacity to affect Vector's size
+   * (even empty classes take up a byte, unless they're base classes).
+   *
+   * Yet again, we eliminate the zero-sized array using partial specialization.
+   * And we eliminate potential size hit by putting capacity/reserved in one
+   * struct, then putting the array (if any) in a derived struct.  If no array
+   * is needed, the derived struct won't consume extra space.
+   */
+  struct CapacityAndReserved
+  {
+    explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
+      : mCapacity(aCapacity)
+#ifdef DEBUG
+      , mReserved(aReserved)
+#endif
+    {}
+    CapacityAndReserved() = default;
+
+    /* Max number of elements storable in the vector without resizing. */
+    size_t mCapacity;
 
 #ifdef DEBUG
-  /* Max elements of reserved or used space in this vector. */
-  size_t mReserved;
+    /* Max elements of reserved or used space in this vector. */
+    size_t mReserved;
 #endif
+  };
 
-  /*
-   * Memory used for inline storage.  We want basically this:
-   *
-   *   alignas(T) unsigned char storage[kInlineCapacity * sizeof(T)];
-   *
-   * but C++ forbids zero-sized arrays that might result if we did this.  We fix
-   * this by (again) using partial specialization, defining an array only if
-   * contains at least one element.
-   */
+// Silence warnings about this struct possibly being padded dued to the
+// alignas() in it -- there's nothing we can do to avoid it.
+#ifdef _MSC_VER
+#  pragma warning(push)
+#  pragma warning(disable:4324)
+#endif // _MSC_VER
+
   template<size_t Capacity, size_t Dummy>
-  struct InlineStorage
+  struct CRAndStorage : CapacityAndReserved
   {
+    explicit CRAndStorage(size_t aCapacity, size_t aReserved)
+      : CapacityAndReserved(aCapacity, aReserved)
+    {}
+    CRAndStorage() = default;
+
     alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
 
     // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
     // T*.  Indirecting through this function addresses the problem.
     void* data() { return mBytes; }
 
-    T* addr() { return static_cast<T*>(data()); }
+    T* storage() { return static_cast<T*>(data()); }
   };
 
   template<size_t Dummy>
-  struct InlineStorage<0, Dummy>
+  struct CRAndStorage<0, Dummy> : CapacityAndReserved
   {
-    T* addr() { return nullptr; }
+    explicit CRAndStorage(size_t aCapacity, size_t aReserved)
+      : CapacityAndReserved(aCapacity, aReserved)
+    {}
+    CRAndStorage() = default;
+
+    T* storage() { return nullptr; }
   };
 
-  InlineStorage<kInlineCapacity, 0> mStorage;
+  CRAndStorage<kInlineCapacity, 0> mTail;
+
+#ifdef _MSC_VER
+#  pragma warning(pop)
+#endif // _MSC_VER
 
 #ifdef DEBUG
   friend class ReentrancyGuard;
   bool mEntered;
 #endif
 
   /* private accessors */
 
   bool usingInlineStorage() const
   {
     return mBegin == const_cast<Vector*>(this)->inlineStorage();
   }
 
   T* inlineStorage()
   {
-    return mStorage.addr();
+    return mTail.storage();
   }
 
   T* beginNoCheck() const
   {
     return mBegin;
   }
 
   T* endNoCheck()
@@ -417,19 +461,19 @@ class MOZ_NON_PARAM Vector final : priva
    * The amount of explicitly allocated space in this vector that is immediately
    * available to be filled by appending additional elements.  This value is
    * always greater than or equal to |length()| -- the vector's actual elements
    * are implicitly reserved.  This value is always less than or equal to
    * |capacity()|.  It may be explicitly increased using the |reserve()| method.
    */
   size_t reserved() const
   {
-    MOZ_ASSERT(mLength <= mReserved);
-    MOZ_ASSERT(mReserved <= mCapacity);
-    return mReserved;
+    MOZ_ASSERT(mLength <= mTail.mReserved);
+    MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
+    return mTail.mReserved;
   }
 #endif
 
   /* Append operations guaranteed to succeed due to pre-reserved space. */
   template<typename U> void internalAppend(U&& aU);
   template<typename U, size_t O, class BP>
   void internalAppendAll(const Vector<U, O, BP>& aU);
   void internalAppendN(const T& aT, size_t aN);
@@ -452,17 +496,17 @@ public:
   AllocPolicy& allocPolicy() { return *this; }
 
   enum { InlineLength = MinInlineCapacity };
 
   size_t length() const { return mLength; }
 
   bool empty() const { return mLength == 0; }
 
-  size_t capacity() const { return mCapacity; }
+  size_t capacity() const { return mTail.mCapacity; }
 
   T* begin()
   {
     MOZ_ASSERT(!mEntered);
     return mBegin;
   }
 
   const T* begin() const
@@ -779,50 +823,49 @@ public:
 private:
   Vector(const Vector&) = delete;
   void operator=(const Vector&) = delete;
 };
 
 /* This does the re-entrancy check plus several other sanity checks. */
 #define MOZ_REENTRANCY_GUARD_ET_AL \
   ReentrancyGuard g(*this); \
-  MOZ_ASSERT_IF(usingInlineStorage(), mCapacity == kInlineCapacity); \
-  MOZ_ASSERT(reserved() <= mCapacity); \
+  MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
+  MOZ_ASSERT(reserved() <= mTail.mCapacity); \
   MOZ_ASSERT(mLength <= reserved()); \
-  MOZ_ASSERT(mLength <= mCapacity)
+  MOZ_ASSERT(mLength <= mTail.mCapacity)
 
 /* Vector Implementation */
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE
 Vector<T, N, AP>::Vector(AP aAP)
   : AP(aAP)
   , mLength(0)
-  , mCapacity(kInlineCapacity)
+  , mTail(kInlineCapacity, 0)
 #ifdef DEBUG
-  , mReserved(0)
   , mEntered(false)
 #endif
 {
   mBegin = inlineStorage();
 }
 
 /* Move constructor. */
 template<typename T, size_t N, class AllocPolicy>
 MOZ_ALWAYS_INLINE
 Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
   : AllocPolicy(Move(aRhs))
 #ifdef DEBUG
   , mEntered(false)
 #endif
 {
   mLength = aRhs.mLength;
-  mCapacity = aRhs.mCapacity;
+  mTail.mCapacity = aRhs.mTail.mCapacity;
 #ifdef DEBUG
-  mReserved = aRhs.mReserved;
+  mTail.mReserved = aRhs.mTail.mReserved;
 #endif
 
   if (aRhs.usingInlineStorage()) {
     /* We can't move the buffer over in this case, so copy elements. */
     mBegin = inlineStorage();
     Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
     /*
      * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
@@ -830,20 +873,20 @@ Vector<T, N, AllocPolicy>::Vector(Vector
      */
   } else {
     /*
      * Take src's buffer, and turn src into an empty vector using
      * in-line storage.
      */
     mBegin = aRhs.mBegin;
     aRhs.mBegin = aRhs.inlineStorage();
-    aRhs.mCapacity = kInlineCapacity;
+    aRhs.mTail.mCapacity = kInlineCapacity;
     aRhs.mLength = 0;
 #ifdef DEBUG
-    aRhs.mReserved = 0;
+    aRhs.mTail.mReserved = 0;
 #endif
   }
 }
 
 /* Move assignment. */
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE Vector<T, N, AP>&
 Vector<T, N, AP>::operator=(Vector&& aRhs)
@@ -897,25 +940,25 @@ Vector<T, N, AP>::convertToHeapStorage(s
 
   /* Copy inline elements into heap buffer. */
   Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
   Impl::destroy(beginNoCheck(), endNoCheck());
 
   /* Switch in heap buffer. */
   mBegin = newBuf;
   /* mLength is unchanged. */
-  mCapacity = aNewCap;
+  mTail.mCapacity = aNewCap;
   return true;
 }
 
 template<typename T, size_t N, class AP>
 MOZ_NEVER_INLINE bool
 Vector<T, N, AP>::growStorageBy(size_t aIncr)
 {
-  MOZ_ASSERT(mLength + aIncr > mCapacity);
+  MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
 
   /*
    * When choosing a new capacity, its size should is as close to 2**N bytes
    * as possible.  2**N-sized requests are best because they are unlikely to
    * be rounded up by the allocator.  Asking for a 2**N number of elements
    * isn't as good, because if sizeof(T) is not a power-of-two that would
    * result in a non-2**N request size.
    */
@@ -997,19 +1040,19 @@ Vector<T, N, AP>::initCapacity(size_t aR
   if (aRequest == 0) {
     return true;
   }
   T* newbuf = this->template pod_malloc<T>(aRequest);
   if (MOZ_UNLIKELY(!newbuf)) {
     return false;
   }
   mBegin = newbuf;
-  mCapacity = aRequest;
+  mTail.mCapacity = aRequest;
 #ifdef DEBUG
-  mReserved = aRequest;
+  mTail.mReserved = aRequest;
 #endif
   return true;
 }
 
 template<typename T, size_t N, class AP>
 inline bool
 Vector<T, N, AP>::initLengthUninitialized(size_t aRequest)
 {
@@ -1024,42 +1067,42 @@ template<typename T, size_t N, class AP>
 inline bool
 Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize)
 {
   if (aRequestedSize <= N) {
     return true;
   }
 
 #ifdef DEBUG
-  if (aRequestedSize <= mReserved) {
+  if (aRequestedSize <= mTail.mReserved) {
     return true;
   }
 #endif
 
   return allocPolicy().checkSimulatedOOM();
 }
 
 template<typename T, size_t N, class AP>
 inline bool
 Vector<T, N, AP>::reserve(size_t aRequest)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
-  if (aRequest > mCapacity) {
+  if (aRequest > mTail.mCapacity) {
     if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(aRequest)) {
     return false;
   }
 #ifdef DEBUG
-  if (aRequest > mReserved) {
-    mReserved = aRequest;
+  if (aRequest > mTail.mReserved) {
+    mTail.mReserved = aRequest;
   }
-  MOZ_ASSERT(mLength <= mReserved);
-  MOZ_ASSERT(mReserved <= mCapacity);
+  MOZ_ASSERT(mLength <= mTail.mReserved);
+  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
 #endif
   return true;
 }
 
 template<typename T, size_t N, class AP>
 inline void
 Vector<T, N, AP>::shrinkBy(size_t aIncr)
 {
@@ -1077,50 +1120,50 @@ Vector<T, N, AP>::shrinkTo(size_t aNewLe
   shrinkBy(mLength - aNewLength);
 }
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE bool
 Vector<T, N, AP>::growBy(size_t aIncr)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
-  if (aIncr > mCapacity - mLength) {
+  if (aIncr > mTail.mCapacity - mLength) {
     if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
     return false;
   }
-  MOZ_ASSERT(mLength + aIncr <= mCapacity);
+  MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
   T* newend = endNoCheck() + aIncr;
   Impl::initialize(endNoCheck(), newend);
   mLength += aIncr;
 #ifdef DEBUG
-  if (mLength > mReserved) {
-    mReserved = mLength;
+  if (mLength > mTail.mReserved) {
+    mTail.mReserved = mLength;
   }
 #endif
   return true;
 }
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE bool
 Vector<T, N, AP>::growByUninitialized(size_t aIncr)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
-  if (aIncr > mCapacity - mLength) {
+  if (aIncr > mTail.mCapacity - mLength) {
     if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
     return false;
   }
 #ifdef DEBUG
-  if (mLength + aIncr > mReserved) {
-    mReserved = mLength + aIncr;
+  if (mLength + aIncr > mTail.mReserved) {
+    mTail.mReserved = mLength + aIncr;
   }
 #endif
   infallibleGrowByUninitialized(aIncr);
   return true;
 }
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE void
@@ -1169,84 +1212,84 @@ Vector<T, N, AP>::clearAndFree()
 {
   clear();
 
   if (usingInlineStorage()) {
     return;
   }
   this->free_(beginNoCheck());
   mBegin = inlineStorage();
-  mCapacity = kInlineCapacity;
+  mTail.mCapacity = kInlineCapacity;
 #ifdef DEBUG
-  mReserved = 0;
+  mTail.mReserved = 0;
 #endif
 }
 
 template<typename T, size_t N, class AP>
 inline void
 Vector<T, N, AP>::podResizeToFit()
 {
   // This function is only defined if IsPod is true and will fail to compile
   // otherwise.
   Impl::podResizeToFit(*this);
 }
 
 template<typename T, size_t N, class AP>
 inline bool
 Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const
 {
-  return mLength + aNeeded <= mCapacity;
+  return mLength + aNeeded <= mTail.mCapacity;
 }
 
 template<typename T, size_t N, class AP>
 template<typename U, size_t O, class BP>
 MOZ_ALWAYS_INLINE void
 Vector<T, N, AP>::internalAppendAll(const Vector<U, O, BP>& aOther)
 {
   internalAppend(aOther.begin(), aOther.length());
 }
 
 template<typename T, size_t N, class AP>
 template<typename U>
 MOZ_ALWAYS_INLINE void
 Vector<T, N, AP>::internalAppend(U&& aU)
 {
-  MOZ_ASSERT(mLength + 1 <= mReserved);
-  MOZ_ASSERT(mReserved <= mCapacity);
+  MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
+  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   Impl::new_(endNoCheck(), Forward<U>(aU));
   ++mLength;
 }
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE bool
 Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
-  if (mLength + aNeeded > mCapacity) {
+  if (mLength + aNeeded > mTail.mCapacity) {
     if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
     return false;
   }
 #ifdef DEBUG
-  if (mLength + aNeeded > mReserved) {
-    mReserved = mLength + aNeeded;
+  if (mLength + aNeeded > mTail.mReserved) {
+    mTail.mReserved = mLength + aNeeded;
   }
 #endif
   internalAppendN(aT, aNeeded);
   return true;
 }
 
 template<typename T, size_t N, class AP>
 MOZ_ALWAYS_INLINE void
 Vector<T, N, AP>::internalAppendN(const T& aT, size_t aNeeded)
 {
-  MOZ_ASSERT(mLength + aNeeded <= mReserved);
-  MOZ_ASSERT(mReserved <= mCapacity);
+  MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
+  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   Impl::copyConstructN(endNoCheck(), aNeeded, aT);
   mLength += aNeeded;
 }
 
 template<typename T, size_t N, class AP>
 template<typename U>
 inline T*
 Vector<T, N, AP>::insert(T* aP, U&& aVal)
@@ -1301,59 +1344,59 @@ Vector<T, N, AP>::erase(T* aBegin, T* aE
 
 template<typename T, size_t N, class AP>
 template<typename U>
 MOZ_ALWAYS_INLINE bool
 Vector<T, N, AP>::append(const U* aInsBegin, const U* aInsEnd)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
   size_t aNeeded = PointerRangeSize(aInsBegin, aInsEnd);
-  if (mLength + aNeeded > mCapacity) {
+  if (mLength + aNeeded > mTail.mCapacity) {
     if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
       return false;
   }
 #ifdef DEBUG
-  if (mLength + aNeeded > mReserved) {
-    mReserved = mLength + aNeeded;
+  if (mLength + aNeeded > mTail.mReserved) {
+    mTail.mReserved = mLength + aNeeded;
   }
 #endif
   internalAppend(aInsBegin, aNeeded);
   return true;
 }
 
 template<typename T, size_t N, class AP>
 template<typename U>
 MOZ_ALWAYS_INLINE void
 Vector<T, N, AP>::internalAppend(const U* aInsBegin, size_t aInsLength)
 {
-  MOZ_ASSERT(mLength + aInsLength <= mReserved);
-  MOZ_ASSERT(mReserved <= mCapacity);
+  MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
+  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
   mLength += aInsLength;
 }
 
 template<typename T, size_t N, class AP>
 template<typename U>
 MOZ_ALWAYS_INLINE bool
 Vector<T, N, AP>::append(U&& aU)
 {
   MOZ_REENTRANCY_GUARD_ET_AL;
-  if (mLength == mCapacity) {
+  if (mLength == mTail.mCapacity) {
     if (MOZ_UNLIKELY(!growStorageBy(1))) {
       return false;
     }
   } else if (!maybeCheckSimulatedOOM(mLength + 1)) {
       return false;
   }
 #ifdef DEBUG
-  if (mLength + 1 > mReserved) {
-    mReserved = mLength + 1;
+  if (mLength + 1 > mTail.mReserved) {
+    mTail.mReserved = mLength + 1;
   }
 #endif
   internalAppend(Forward<U>(aU));
   return true;
 }
 
 template<typename T, size_t N, class AP>
 template<typename U, size_t O, class BP>
@@ -1398,19 +1441,19 @@ Vector<T, N, AP>::extractRawBuffer()
 
   if (usingInlineStorage()) {
     return nullptr;
   }
 
   T* ret = mBegin;
   mBegin = inlineStorage();
   mLength = 0;
-  mCapacity = kInlineCapacity;
+  mTail.mCapacity = kInlineCapacity;
 #ifdef DEBUG
-  mReserved = 0;
+  mTail.mReserved = 0;
 #endif
   return ret;
 }
 
 template<typename T, size_t N, class AP>
 inline T*
 Vector<T, N, AP>::extractOrCopyRawBuffer()
 {
@@ -1424,19 +1467,19 @@ Vector<T, N, AP>::extractOrCopyRawBuffer
   if (!copy) {
     return nullptr;
   }
 
   Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
   Impl::destroy(beginNoCheck(), endNoCheck());
   mBegin = inlineStorage();
   mLength = 0;
-  mCapacity = kInlineCapacity;
+  mTail.mCapacity = kInlineCapacity;
 #ifdef DEBUG
-  mReserved = 0;
+  mTail.mReserved = 0;
 #endif
   return copy;
 }
 
 template<typename T, size_t N, class AP>
 inline void
 Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength)
 {
@@ -1452,27 +1495,27 @@ Vector<T, N, AP>::replaceRawBuffer(T* aP
   if (aLength <= kInlineCapacity) {
     /*
      * We convert to inline storage if possible, even though aP might
      * otherwise be acceptable.  Maybe this behaviour should be
      * specifiable with an argument to this function.
      */
     mBegin = inlineStorage();
     mLength = aLength;
-    mCapacity = kInlineCapacity;
+    mTail.mCapacity = kInlineCapacity;
     Impl::moveConstruct(mBegin, aP, aP + aLength);
     Impl::destroy(aP, aP + aLength);
     this->free_(aP);
   } else {
     mBegin = aP;
     mLength = aLength;
-    mCapacity = aLength;
+    mTail.mCapacity = aLength;
   }
 #ifdef DEBUG
-  mReserved = aLength;
+  mTail.mReserved = aLength;
 #endif
 }
 
 template<typename T, size_t N, class AP>
 inline size_t
 Vector<T, N, AP>::sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
 {
   return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
@@ -1501,19 +1544,19 @@ Vector<T, N, AP>::swap(Vector& aOther)
     aOther.mBegin = aOther.inlineStorage();
   } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
     Swap(mBegin, aOther.mBegin);
   } else {
     // This case is a no-op, since we'd set both to use their inline storage.
   }
 
   Swap(mLength, aOther.mLength);
-  Swap(mCapacity, aOther.mCapacity);
+  Swap(mTail.mCapacity, aOther.mTail.mCapacity);
 #ifdef DEBUG
-  Swap(mReserved, aOther.mReserved);
+  Swap(mTail.mReserved, aOther.mTail.mReserved);
 #endif
 }
 
 } // namespace mozilla
 
 #ifdef _MSC_VER
 #pragma warning(pop)
 #endif
--- a/mfbt/tests/TestVector.cpp
+++ b/mfbt/tests/TestVector.cpp
@@ -341,16 +341,59 @@ mozilla::detail::VectorTesting::testExtr
   for (size_t i = 0; i < 10; i++) {
     MOZ_RELEASE_ASSERT(buf[i].j == i);
     MOZ_RELEASE_ASSERT(*buf[i].k == i * i);
   }
 
   free(buf);
 }
 
+// Declare but leave (permanently) incomplete.
+struct Incomplete;
+
+// We could even *construct* a Vector<Incomplete, 0> if we wanted.  But we can't
+// destruct it, so it's not worth the trouble.
+static_assert(sizeof(Vector<Incomplete, 0>) > 0,
+              "Vector of an incomplete type will compile");
+
+// Vector with no inline storage should occupy the absolute minimum space in
+// non-debug builds.  (Debug adds a laundry list of other constraints, none
+// directly relevant to shipping builds, that aren't worth precisely modeling.)
+#ifndef DEBUG
+
+template<typename T>
+struct NoInlineStorageLayout
+{
+  T* mBegin;
+  size_t mLength;
+  struct CRAndStorage {
+    size_t mCapacity;
+  } mTail;
+};
+
+// Only one of these should be necessary, but test a few of them for good
+// measure.
+static_assert(sizeof(Vector<int, 0>) == sizeof(NoInlineStorageLayout<int>),
+              "Vector of int without inline storage shouldn't occupy dead "
+              "space for that absence of storage");
+
+static_assert(sizeof(Vector<bool, 0>) == sizeof(NoInlineStorageLayout<bool>),
+              "Vector of bool without inline storage shouldn't occupy dead "
+              "space for that absence of storage");
+
+static_assert(sizeof(Vector<S, 0>) == sizeof(NoInlineStorageLayout<S>),
+              "Vector of S without inline storage shouldn't occupy dead "
+              "space for that absence of storage");
+
+static_assert(sizeof(Vector<Incomplete, 0>) == sizeof(NoInlineStorageLayout<Incomplete>),
+              "Vector of an incomplete class without inline storage shouldn't "
+              "occupy dead space for that absence of storage");
+
+#endif // DEBUG
+
 int
 main()
 {
   VectorTesting::testReserved();
   VectorTesting::testConstRange();
   VectorTesting::testEmplaceBack();
   VectorTesting::testReverse();
   VectorTesting::testExtractRawBuffer();