merge mozilla-inbound to mozilla-central a=merge
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Tue, 19 May 2015 11:58:45 +0200
changeset 244483 4fb7ff694bf5944b76c20b2240fb3abdc55f9a4e
parent 244431 9ed6520fcf47884bba6ed2e14f5865f49137dfbb (current diff)
parent 244482 e7cd988251979d27a012c80fc2747168bb1c87e1 (diff)
child 244523 ac277e615f8f9af7fb4d58e1779c2104691d567a
child 244629 031246dbe72452fb07a02e15e2cfc6179d5b70df
push id28781
push usercbook@mozilla.com
push dateTue, 19 May 2015 10:01:15 +0000
treeherdermozilla-central@4fb7ff694bf5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone41.0a1
first release with
nightly linux32
4fb7ff694bf5 / 41.0a1 / 20150519030202 / files
nightly linux64
4fb7ff694bf5 / 41.0a1 / 20150519030202 / files
nightly mac
4fb7ff694bf5 / 41.0a1 / 20150519030202 / files
nightly win32
4fb7ff694bf5 / 41.0a1 / 20150519030202 / files
nightly win64
4fb7ff694bf5 / 41.0a1 / 20150519030202 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-inbound to mozilla-central a=merge
js/src/jit/LiveRangeAllocator.cpp
js/src/jit/LiveRangeAllocator.h
--- a/caps/BasePrincipal.cpp
+++ b/caps/BasePrincipal.cpp
@@ -1,18 +1,79 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=2 sw=2 et tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/BasePrincipal.h"
 
+#include "nsIObjectInputStream.h"
+#include "nsIObjectOutputStream.h"
+#include "nsScriptSecurityManager.h"
+
 namespace mozilla {
 
+void
+BasePrincipal::OriginAttributes::Serialize(nsIObjectOutputStream* aStream) const
+{
+  aStream->Write32(mAppId);
+  aStream->WriteBoolean(mIsInBrowserElement);
+}
+
+nsresult
+BasePrincipal::OriginAttributes::Deserialize(nsIObjectInputStream* aStream)
+{
+  nsresult rv = aStream->Read32(&mAppId);
+  NS_ENSURE_SUCCESS(rv, rv);
+
+  rv = aStream->ReadBoolean(&mIsInBrowserElement);
+  NS_ENSURE_SUCCESS(rv, rv);
+
+  return NS_OK;
+}
+
+bool
+BasePrincipal::Subsumes(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration)
+{
+  MOZ_RELEASE_ASSERT(aOther, "The caller is performing a nonsensical security check!");
+  return SubsumesInternal(aOther, aConsideration);
+}
+
+NS_IMETHODIMP
+BasePrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
+{
+
+  *aResult = Subsumes(aOther, DontConsiderDocumentDomain) &&
+             Cast(aOther)->Subsumes(this, DontConsiderDocumentDomain);
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
+{
+  *aResult = Subsumes(aOther, ConsiderDocumentDomain) &&
+             Cast(aOther)->Subsumes(this, ConsiderDocumentDomain);
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
+{
+  *aResult = Subsumes(aOther, DontConsiderDocumentDomain);
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
+{
+  *aResult = Subsumes(aOther, ConsiderDocumentDomain);
+  return NS_OK;
+}
+
 NS_IMETHODIMP
 BasePrincipal::GetCsp(nsIContentSecurityPolicy** aCsp)
 {
   NS_IF_ADDREF(*aCsp = mCSP);
   return NS_OK;
 }
 
 NS_IMETHODIMP
@@ -29,10 +90,58 @@ BasePrincipal::SetCsp(nsIContentSecurity
 
 NS_IMETHODIMP
 BasePrincipal::GetIsNullPrincipal(bool* aIsNullPrincipal)
 {
   *aIsNullPrincipal = false;
   return NS_OK;
 }
 
+NS_IMETHODIMP
+BasePrincipal::GetJarPrefix(nsACString& aJarPrefix)
+{
+  MOZ_ASSERT(AppId() != nsIScriptSecurityManager::UNKNOWN_APP_ID);
+
+  mozilla::GetJarPrefix(AppId(), IsInBrowserElement(), aJarPrefix);
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::GetAppStatus(uint16_t* aAppStatus)
+{
+  if (AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
+    NS_WARNING("Asking for app status on a principal with an unknown app id");
+    *aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
+    return NS_OK;
+  }
+
+  *aAppStatus = nsScriptSecurityManager::AppStatusForPrincipal(this);
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::GetAppId(uint32_t* aAppId)
+{
+  if (AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
+    MOZ_ASSERT(false);
+    *aAppId = nsIScriptSecurityManager::NO_APP_ID;
+    return NS_OK;
+  }
+
+  *aAppId = AppId();
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
+{
+  *aIsInBrowserElement = IsInBrowserElement();
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+BasePrincipal::GetUnknownAppId(bool* aUnknownAppId)
+{
+  *aUnknownAppId = AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID;
+  return NS_OK;
+}
 
 } // namespace mozilla
--- a/caps/BasePrincipal.h
+++ b/caps/BasePrincipal.h
@@ -3,38 +3,88 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_BasePrincipal_h
 #define mozilla_BasePrincipal_h
 
 #include "nsIPrincipal.h"
+#include "nsIScriptSecurityManager.h"
 #include "nsJSPrincipals.h"
 
+class nsIObjectOutputStream;
+class nsIObjectInputStream;
+
 namespace mozilla {
 
 /*
  * Base class from which all nsIPrincipal implementations inherit. Use this for
  * default implementations and other commonalities between principal
  * implementations.
  *
  * We should merge nsJSPrincipals into this class at some point.
  */
 class BasePrincipal : public nsJSPrincipals
 {
 public:
   BasePrincipal() {}
+
+  enum DocumentDomainConsideration { DontConsiderDocumentDomain, ConsiderDocumentDomain};
+  bool Subsumes(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration);
+
+  NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) final;
+  NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) final;
+  NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) final;
+  NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) final;
   NS_IMETHOD GetCsp(nsIContentSecurityPolicy** aCsp) override;
   NS_IMETHOD SetCsp(nsIContentSecurityPolicy* aCsp) override;
   NS_IMETHOD GetIsNullPrincipal(bool* aIsNullPrincipal) override;
+  NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) final;
+  NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) final;
+  NS_IMETHOD GetAppId(uint32_t* aAppStatus) final;
+  NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) final;
+  NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) final;
 
   virtual bool IsOnCSSUnprefixingWhitelist() override { return false; }
 
+  static BasePrincipal* Cast(nsIPrincipal* aPrin) { return static_cast<BasePrincipal*>(aPrin); }
+
+  struct OriginAttributes {
+    // NB: If you add any members here, you need to update Serialize/Deserialize
+    // and bump the CIDs of all the principal implementations that invoke those
+    // methods.
+    uint32_t mAppId;
+    bool mIsInBrowserElement;
+
+    OriginAttributes() : mAppId(nsIScriptSecurityManager::NO_APP_ID), mIsInBrowserElement(false) {}
+    OriginAttributes(uint32_t aAppId, bool aIsInBrowserElement)
+      : mAppId(aAppId), mIsInBrowserElement(aIsInBrowserElement) {}
+    bool operator==(const OriginAttributes& aOther) const
+    {
+      return mAppId == aOther.mAppId &&
+             mIsInBrowserElement == aOther.mIsInBrowserElement;
+    }
+    bool operator!=(const OriginAttributes& aOther) const
+    {
+      return !(*this == aOther);
+    }
+
+    void Serialize(nsIObjectOutputStream* aStream) const;
+    nsresult Deserialize(nsIObjectInputStream* aStream);
+  };
+
+  const OriginAttributes& OriginAttributesRef() { return mOriginAttributes; }
+  uint32_t AppId() const { return mOriginAttributes.mAppId; }
+  bool IsInBrowserElement() const { return mOriginAttributes.mIsInBrowserElement; }
+
 protected:
   virtual ~BasePrincipal() {}
 
+  virtual bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsider) = 0;
+
   nsCOMPtr<nsIContentSecurityPolicy> mCSP;
+  OriginAttributes mOriginAttributes;
 };
 
 } // namespace mozilla
 
 #endif /* mozilla_BasePrincipal_h */
--- a/caps/nsNullPrincipal.cpp
+++ b/caps/nsNullPrincipal.cpp
@@ -12,18 +12,16 @@
 
 #include "mozilla/ArrayUtils.h"
 
 #include "nsNullPrincipal.h"
 #include "nsNullPrincipalURI.h"
 #include "nsMemory.h"
 #include "nsNetUtil.h"
 #include "nsIClassInfoImpl.h"
-#include "nsIObjectInputStream.h"
-#include "nsIObjectOutputStream.h"
 #include "nsNetCID.h"
 #include "nsError.h"
 #include "nsIScriptSecurityManager.h"
 #include "nsPrincipal.h"
 #include "nsScriptSecurityManager.h"
 #include "pratom.h"
 
 using namespace mozilla;
@@ -36,37 +34,35 @@ NS_IMPL_QUERY_INTERFACE_CI(nsNullPrincip
 NS_IMPL_CI_INTERFACE_GETTER(nsNullPrincipal,
                             nsIPrincipal,
                             nsISerializable)
 
 /* static */ already_AddRefed<nsNullPrincipal>
 nsNullPrincipal::CreateWithInheritedAttributes(nsIPrincipal* aInheritFrom)
 {
   nsRefPtr<nsNullPrincipal> nullPrin = new nsNullPrincipal();
-  nsresult rv = nullPrin->Init(aInheritFrom->GetAppId(),
-                               aInheritFrom->GetIsInBrowserElement());
+  nsresult rv = nullPrin->Init(Cast(aInheritFrom)->OriginAttributesRef());
   return NS_SUCCEEDED(rv) ? nullPrin.forget() : nullptr;
 }
 
 /* static */ already_AddRefed<nsNullPrincipal>
-nsNullPrincipal::Create(uint32_t aAppId, bool aInMozBrowser)
+nsNullPrincipal::Create(const OriginAttributes& aOriginAttributes)
 {
   nsRefPtr<nsNullPrincipal> nullPrin = new nsNullPrincipal();
-  nsresult rv = nullPrin->Init(aAppId, aInMozBrowser);
+  nsresult rv = nullPrin->Init(aOriginAttributes);
   NS_ENSURE_SUCCESS(rv, nullptr);
 
   return nullPrin.forget();
 }
 
 nsresult
-nsNullPrincipal::Init(uint32_t aAppId, bool aInMozBrowser)
+nsNullPrincipal::Init(const OriginAttributes& aOriginAttributes)
 {
-  MOZ_ASSERT(aAppId != nsIScriptSecurityManager::UNKNOWN_APP_ID);
-  mAppId = aAppId;
-  mInMozBrowser = aInMozBrowser;
+  mOriginAttributes = aOriginAttributes;
+  MOZ_ASSERT(AppId() != nsIScriptSecurityManager::UNKNOWN_APP_ID);
 
   mURI = nsNullPrincipalURI::Create();
   NS_ENSURE_TRUE(mURI, NS_ERROR_NOT_AVAILABLE);
 
   return NS_OK;
 }
 
 void
@@ -75,31 +71,16 @@ nsNullPrincipal::GetScriptLocation(nsACS
   mURI->GetSpec(aStr);
 }
 
 /**
  * nsIPrincipal implementation
  */
 
 NS_IMETHODIMP
-nsNullPrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
-{
-  // Just equal to ourselves.  Note that nsPrincipal::Equals will return false
-  // for us since we have a unique domain/origin/etc.
-  *aResult = (aOther == this);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
-{
-  return Equals(aOther, aResult);
-}
-
-NS_IMETHODIMP
 nsNullPrincipal::GetHashValue(uint32_t *aResult)
 {
   *aResult = (NS_PTR_TO_INT32(this) >> 2);
   return NS_OK;
 }
 
 NS_IMETHODIMP 
 nsNullPrincipal::GetURI(nsIURI** aURI)
@@ -123,32 +104,16 @@ nsNullPrincipal::SetDomain(nsIURI* aDoma
 
 NS_IMETHODIMP
 nsNullPrincipal::GetOrigin(nsACString& aOrigin)
 {
   return mURI->GetSpec(aOrigin);
 }
 
 NS_IMETHODIMP
-nsNullPrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
-{
-  // We don't subsume anything except ourselves.  Note that nsPrincipal::Equals
-  // will return false for us, since we're not about:blank and not Equals to
-  // reasonable nsPrincipals.
-  *aResult = (aOther == this);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
-{
-  return Subsumes(aOther, aResult);
-}
-
-NS_IMETHODIMP
 nsNullPrincipal::CheckMayLoad(nsIURI* aURI, bool aReport, bool aAllowIfInheritsPrincipal)
  {
   if (aAllowIfInheritsPrincipal) {
     if (nsPrincipal::IsPrincipalInherited(aURI)) {
       return NS_OK;
     }
   }
 
@@ -167,51 +132,16 @@ nsNullPrincipal::CheckMayLoad(nsIURI* aU
     nsScriptSecurityManager::ReportError(
       nullptr, NS_LITERAL_STRING("CheckSameOriginError"), mURI, aURI);
   }
 
   return NS_ERROR_DOM_BAD_URI;
 }
 
 NS_IMETHODIMP
-nsNullPrincipal::GetJarPrefix(nsACString& aJarPrefix)
-{
-  aJarPrefix.Truncate();
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::GetAppStatus(uint16_t* aAppStatus)
-{
-  *aAppStatus = nsScriptSecurityManager::AppStatusForPrincipal(this);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::GetAppId(uint32_t* aAppId)
-{
-  *aAppId = mAppId;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
-{
-  *aIsInBrowserElement = mInMozBrowser;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsNullPrincipal::GetUnknownAppId(bool* aUnknownAppId)
-{
-  *aUnknownAppId = false;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
 nsNullPrincipal::GetIsNullPrincipal(bool* aIsNullPrincipal)
 {
   *aIsNullPrincipal = true;
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsNullPrincipal::GetBaseDomain(nsACString& aBaseDomain)
@@ -225,25 +155,18 @@ nsNullPrincipal::GetBaseDomain(nsACStrin
  */
 NS_IMETHODIMP
 nsNullPrincipal::Read(nsIObjectInputStream* aStream)
 {
   // Note - nsNullPrincipal use NS_GENERIC_FACTORY_CONSTRUCTOR_INIT, which means
   // that the Init() method has already been invoked by the time we deserialize.
   // This is in contrast to nsPrincipal, which uses NS_GENERIC_FACTORY_CONSTRUCTOR,
   // in which case ::Read needs to invoke Init().
-  nsresult rv = aStream->Read32(&mAppId);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = aStream->ReadBoolean(&mInMozBrowser);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  return NS_OK;
+  return mOriginAttributes.Deserialize(aStream);
 }
 
 NS_IMETHODIMP
 nsNullPrincipal::Write(nsIObjectOutputStream* aStream)
 {
-  aStream->Write32(mAppId);
-  aStream->WriteBoolean(mInMozBrowser);
+  OriginAttributesRef().Serialize(aStream);
   return NS_OK;
 }
 
--- a/caps/nsNullPrincipal.h
+++ b/caps/nsNullPrincipal.h
@@ -35,49 +35,41 @@ public:
   // This should only be used by deserialization, and the factory constructor.
   // Other consumers should use the Create and CreateWithInheritedAttributes
   // methods.
   nsNullPrincipal() {}
 
   NS_DECL_NSISERIALIZABLE
 
   NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
-  NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
   NS_IMETHOD GetURI(nsIURI** aURI) override;
   NS_IMETHOD GetDomain(nsIURI** aDomain) override;
   NS_IMETHOD SetDomain(nsIURI* aDomain) override;
   NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
-  NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
-  NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
-  NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
-  NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
-  NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
-  NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
   NS_IMETHOD GetIsNullPrincipal(bool* aIsNullPrincipal) override;
   NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
 
   // Returns null on failure.
   static already_AddRefed<nsNullPrincipal> CreateWithInheritedAttributes(nsIPrincipal *aInheritFrom);
 
   // Returns null on failure.
   static already_AddRefed<nsNullPrincipal>
-    Create(uint32_t aAppId = nsIScriptSecurityManager::NO_APP_ID,
-           bool aInMozBrowser = false);
+    Create(const OriginAttributes& aOriginAttributes = OriginAttributes());
 
-  nsresult Init(uint32_t aAppId = nsIScriptSecurityManager::NO_APP_ID,
-                bool aInMozBrowser = false);
+  nsresult Init(const OriginAttributes& aOriginAttributes = OriginAttributes());
 
   virtual void GetScriptLocation(nsACString &aStr) override;
 
  protected:
   virtual ~nsNullPrincipal() {}
 
+  bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override
+  {
+    return aOther == this;
+  }
+
   nsCOMPtr<nsIURI> mURI;
   nsCOMPtr<nsIContentSecurityPolicy> mCSP;
-  uint32_t mAppId;
-  bool mInMozBrowser;
 };
 
 #endif // nsNullPrincipal_h__
--- a/caps/nsPrincipal.cpp
+++ b/caps/nsPrincipal.cpp
@@ -10,18 +10,16 @@
 #include "nscore.h"
 #include "nsScriptSecurityManager.h"
 #include "nsString.h"
 #include "nsReadableUtils.h"
 #include "pratom.h"
 #include "nsIURI.h"
 #include "nsJSPrincipals.h"
 #include "nsIEffectiveTLDService.h"
-#include "nsIObjectInputStream.h"
-#include "nsIObjectOutputStream.h"
 #include "nsIClassInfoImpl.h"
 #include "nsIProtocolHandler.h"
 #include "nsError.h"
 #include "nsIContentSecurityPolicy.h"
 #include "nsNetCID.h"
 #include "jswrapper.h"
 
 #include "mozilla/dom/ScriptSettings.h"
@@ -64,41 +62,35 @@ nsPrincipal::InitializeStatics()
     "layout.css.unprefixing-service.include-test-domains");
 
   Preferences::AddBoolVarCache(&gCodeBasePrincipalSupport,
                                "signed.applets.codebase_principal_support",
                                false);
 }
 
 nsPrincipal::nsPrincipal()
-  : mAppId(nsIScriptSecurityManager::UNKNOWN_APP_ID)
-  , mInMozBrowser(false)
-  , mCodebaseImmutable(false)
+  : mCodebaseImmutable(false)
   , mDomainImmutable(false)
   , mInitialized(false)
 { }
 
 nsPrincipal::~nsPrincipal()
 { }
 
 nsresult
-nsPrincipal::Init(nsIURI *aCodebase,
-                  uint32_t aAppId,
-                  bool aInMozBrowser)
+nsPrincipal::Init(nsIURI *aCodebase, const OriginAttributes& aOriginAttributes)
 {
   NS_ENSURE_STATE(!mInitialized);
   NS_ENSURE_ARG(aCodebase);
 
   mInitialized = true;
 
   mCodebase = NS_TryToMakeImmutable(aCodebase);
   mCodebaseImmutable = URIIsImmutable(mCodebase);
-
-  mAppId = aAppId;
-  mInMozBrowser = aInMozBrowser;
+  mOriginAttributes = aOriginAttributes;
 
   return NS_OK;
 }
 
 void
 nsPrincipal::GetScriptLocation(nsACString &aStr)
 {
   mCodebase->GetSpec(aStr);
@@ -158,103 +150,55 @@ nsPrincipal::GetOriginForURI(nsIURI* aUR
 }
 
 NS_IMETHODIMP
 nsPrincipal::GetOrigin(nsACString& aOrigin)
 {
   return GetOriginForURI(mCodebase, aOrigin);
 }
 
-NS_IMETHODIMP
-nsPrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
+bool
+nsPrincipal::SubsumesInternal(nsIPrincipal* aOther,
+                              BasePrincipal::DocumentDomainConsideration aConsideration)
 {
-  *aResult = false;
+  MOZ_ASSERT(aOther);
 
-  if (!aOther) {
-    NS_WARNING("Need a principal to compare this to!");
-    return NS_OK;
+  // For nsPrincipal, Subsumes is equivalent to Equals.
+  if (aOther == this) {
+    return true;
   }
 
-  if (aOther == this) {
-    *aResult = true;
-    return NS_OK;
-  }
-
-  if (!nsScriptSecurityManager::AppAttributesEqual(this, aOther)) {
-      return NS_OK;
+  if (OriginAttributesRef() != Cast(aOther)->OriginAttributesRef()) {
+    return false;
   }
 
   // If either the subject or the object has changed its principal by
   // explicitly setting document.domain then the other must also have
   // done so in order to be considered the same origin. This prevents
   // DNS spoofing based on document.domain (154930)
-
-  nsCOMPtr<nsIURI> thisURI;
-  this->GetDomain(getter_AddRefs(thisURI));
-  bool thisSetDomain = !!thisURI;
-  if (!thisURI) {
-      this->GetURI(getter_AddRefs(thisURI));
-  }
+  nsresult rv;
+  if (aConsideration == ConsiderDocumentDomain) {
+    // Get .domain on each principal.
+    nsCOMPtr<nsIURI> thisDomain, otherDomain;
+    GetDomain(getter_AddRefs(thisDomain));
+    aOther->GetDomain(getter_AddRefs(otherDomain));
 
-  nsCOMPtr<nsIURI> otherURI;
-  aOther->GetDomain(getter_AddRefs(otherURI));
-  bool otherSetDomain = !!otherURI;
-  if (!otherURI) {
-      aOther->GetURI(getter_AddRefs(otherURI));
-  }
-
-  *aResult = thisSetDomain == otherSetDomain &&
-             nsScriptSecurityManager::SecurityCompareURIs(thisURI, otherURI);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
-{
-  *aResult = false;
-
-  if (!aOther) {
-    NS_WARNING("Need a principal to compare this to!");
-    return NS_OK;
+    // If either has .domain set, we have equality i.f.f. the domains match.
+    // Otherwise, we fall through to the non-document-domain-considering case.
+    if (thisDomain || otherDomain) {
+      return nsScriptSecurityManager::SecurityCompareURIs(thisDomain, otherDomain);
+    }
   }
 
-  if (aOther == this) {
-    *aResult = true;
-    return NS_OK;
-  }
-
-  if (!nsScriptSecurityManager::AppAttributesEqual(this, aOther)) {
-    return NS_OK;
-  }
-
-  nsCOMPtr<nsIURI> otherURI;
-  nsresult rv = aOther->GetURI(getter_AddRefs(otherURI));
-  if (NS_FAILED(rv)) {
-    return rv;
-  }
-
-  NS_ASSERTION(mCodebase,
-               "shouldn't be calling this on principals from preferences");
+    nsCOMPtr<nsIURI> otherURI;
+    rv = aOther->GetURI(getter_AddRefs(otherURI));
+    NS_ENSURE_SUCCESS(rv, false);
 
   // Compare codebases.
-  *aResult = nsScriptSecurityManager::SecurityCompareURIs(mCodebase,
-                                                          otherURI);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
-{
-  return Equals(aOther, aResult);
-}
-
-NS_IMETHODIMP
-nsPrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
-{
-  return EqualsConsideringDomain(aOther, aResult);
+  return nsScriptSecurityManager::SecurityCompareURIs(mCodebase, otherURI);
 }
 
 NS_IMETHODIMP
 nsPrincipal::GetURI(nsIURI** aURI)
 {
   if (mCodebaseImmutable) {
     NS_ADDREF(*aURI = mCodebase);
     return NS_OK;
@@ -357,59 +301,16 @@ nsPrincipal::SetDomain(nsIURI* aDomain)
   success = js::RecomputeWrappers(cx, js::CompartmentsWithPrincipals(principals),
                                   js::ContentCompartmentsOnly());
   NS_ENSURE_TRUE(success, NS_ERROR_FAILURE);
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsPrincipal::GetJarPrefix(nsACString& aJarPrefix)
-{
-  MOZ_ASSERT(mAppId != nsIScriptSecurityManager::UNKNOWN_APP_ID);
-
-  mozilla::GetJarPrefix(mAppId, mInMozBrowser, aJarPrefix);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::GetAppStatus(uint16_t* aAppStatus)
-{
-  *aAppStatus = GetAppStatus();
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::GetAppId(uint32_t* aAppId)
-{
-  if (mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
-    MOZ_ASSERT(false);
-    *aAppId = nsIScriptSecurityManager::NO_APP_ID;
-    return NS_OK;
-  }
-
-  *aAppId = mAppId;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
-{
-  *aIsInBrowserElement = mInMozBrowser;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsPrincipal::GetUnknownAppId(bool* aUnknownAppId)
-{
-  *aUnknownAppId = mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
 nsPrincipal::GetBaseDomain(nsACString& aBaseDomain)
 {
   // For a file URI, we return the file path.
   if (NS_URIIsLocalFile(mCodebase)) {
     nsCOMPtr<nsIURL> url = do_QueryInterface(mCodebase);
 
     if (url) {
       return url->GetFilePath(aBaseDomain);
@@ -454,31 +355,27 @@ nsPrincipal::Read(nsIObjectInputStream* 
   nsCOMPtr<nsIURI> domain;
   rv = NS_ReadOptionalObject(aStream, true, getter_AddRefs(supports));
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   domain = do_QueryInterface(supports);
 
-  uint32_t appId;
-  rv = aStream->Read32(&appId);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  bool inMozBrowser;
-  rv = aStream->ReadBoolean(&inMozBrowser);
+  OriginAttributes attrs;
+  rv = attrs.Deserialize(aStream);
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = NS_ReadOptionalObject(aStream, true, getter_AddRefs(supports));
   NS_ENSURE_SUCCESS(rv, rv);
 
   // This may be null.
   nsCOMPtr<nsIContentSecurityPolicy> csp = do_QueryInterface(supports, &rv);
 
-  rv = Init(codebase, appId, inMozBrowser);
+  rv = Init(codebase, attrs);
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = SetCsp(csp);
   NS_ENSURE_SUCCESS(rv, rv);
 
   // need to link in the CSP context here (link in the URI of the protected
   // resource).
   if (csp) {
@@ -502,42 +399,31 @@ nsPrincipal::Write(nsIObjectOutputStream
   }
 
   rv = NS_WriteOptionalCompoundObject(aStream, mDomain, NS_GET_IID(nsIURI),
                                       true);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
-  aStream->Write32(mAppId);
-  aStream->WriteBoolean(mInMozBrowser);
+  OriginAttributesRef().Serialize(aStream);
 
   rv = NS_WriteOptionalCompoundObject(aStream, mCSP,
                                       NS_GET_IID(nsIContentSecurityPolicy),
                                       true);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   // mCodebaseImmutable and mDomainImmutable will be recomputed based
   // on the deserialized URIs in Read().
 
   return NS_OK;
 }
 
-uint16_t
-nsPrincipal::GetAppStatus()
-{
-  if (mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
-    NS_WARNING("Asking for app status on a principal with an unknown app id");
-    return nsIPrincipal::APP_STATUS_NOT_INSTALLED;
-  }
-  return nsScriptSecurityManager::AppStatusForPrincipal(this);
-}
-
 // Helper-function to indicate whether the CSS Unprefixing Service
 // whitelist should include dummy domains that are only intended for
 // use in testing. (Controlled by a pref.)
 static inline bool
 IsWhitelistingTestDomains()
 {
   return gIsWhitelistingTestDomains;
 }
@@ -787,105 +673,43 @@ nsExpandedPrincipal::GetOrigin(nsACStrin
     NS_ENSURE_SUCCESS(rv, rv);
     aOrigin.Append(subOrigin);
   }
 
   aOrigin.Append("]]");
   return NS_OK;
 }
 
-typedef nsresult (NS_STDCALL nsIPrincipal::*nsIPrincipalMemFn)(nsIPrincipal* aOther,
-                                                               bool* aResult);
-#define CALL_MEMBER_FUNCTION(THIS,MEM_FN)  ((THIS)->*(MEM_FN))
-
-// nsExpandedPrincipal::Equals and nsExpandedPrincipal::EqualsConsideringDomain
-// shares the same logic. The difference only that Equals requires 'this'
-// and 'aOther' to Subsume each other while EqualsConsideringDomain requires
-// bidirectional SubsumesConsideringDomain.
-static nsresult
-Equals(nsExpandedPrincipal* aThis, nsIPrincipalMemFn aFn, nsIPrincipal* aOther,
-       bool* aResult)
+bool
+nsExpandedPrincipal::SubsumesInternal(nsIPrincipal* aOther,
+                                      BasePrincipal::DocumentDomainConsideration aConsideration)
 {
-  // If (and only if) 'aThis' and 'aOther' both Subsume/SubsumesConsideringDomain
-  // each other, then they are Equal.
-  *aResult = false;
-  // Calling the corresponding subsume function on this (aFn).
-  nsresult rv = CALL_MEMBER_FUNCTION(aThis, aFn)(aOther, aResult);
-  NS_ENSURE_SUCCESS(rv, rv);
-  if (!*aResult)
-    return NS_OK;
-
-  // Calling the corresponding subsume function on aOther (aFn).
-  rv = CALL_MEMBER_FUNCTION(aOther, aFn)(aThis, aResult);
-  NS_ENSURE_SUCCESS(rv, rv);
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::Equals(nsIPrincipal* aOther, bool* aResult)
-{
-  return ::Equals(this, &nsIPrincipal::Subsumes, aOther, aResult);
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::EqualsConsideringDomain(nsIPrincipal* aOther, bool* aResult)
-{
-  return ::Equals(this, &nsIPrincipal::SubsumesConsideringDomain, aOther, aResult);
-}
-
-// nsExpandedPrincipal::Subsumes and nsExpandedPrincipal::SubsumesConsideringDomain
-// shares the same logic. The difference only that Subsumes calls are replaced
-//with SubsumesConsideringDomain calls in the second case.
-static nsresult
-Subsumes(nsExpandedPrincipal* aThis, nsIPrincipalMemFn aFn, nsIPrincipal* aOther,
-         bool* aResult)
-{
-  nsresult rv;
+  // If aOther is an ExpandedPrincipal too, we break it down into its component
+  // nsIPrincipals, and check subsumes on each one.
   nsCOMPtr<nsIExpandedPrincipal> expanded = do_QueryInterface(aOther);
   if (expanded) {
-    // If aOther is an ExpandedPrincipal too, check if all of its
-    // principals are subsumed.
     nsTArray< nsCOMPtr<nsIPrincipal> >* otherList;
     expanded->GetWhiteList(&otherList);
     for (uint32_t i = 0; i < otherList->Length(); ++i){
-      rv = CALL_MEMBER_FUNCTION(aThis, aFn)((*otherList)[i], aResult);
-      NS_ENSURE_SUCCESS(rv, rv);
-      if (!*aResult) {
-        // If we don't subsume at least one principal of aOther, return false.
-        return NS_OK;
+      if (!SubsumesInternal((*otherList)[i], aConsideration)) {
+        return false;
       }
     }
-  } else {
-    // For a regular aOther, one of our principals must subsume it.
-    nsTArray< nsCOMPtr<nsIPrincipal> >* list;
-    aThis->GetWhiteList(&list);
-    for (uint32_t i = 0; i < list->Length(); ++i){
-      rv = CALL_MEMBER_FUNCTION((*list)[i], aFn)(aOther, aResult);
-      NS_ENSURE_SUCCESS(rv, rv);
-      if (*aResult) {
-        // If one of our principal subsumes it, return true.
-        return NS_OK;
-      }
+    return true;
+  }
+
+  // We're dealing with a regular principal. One of our principals must subsume
+  // it.
+  for (uint32_t i = 0; i < mPrincipals.Length(); ++i) {
+    if (Cast(mPrincipals[i])->Subsumes(aOther, aConsideration)) {
+      return true;
     }
   }
-  return NS_OK;
-}
 
-#undef CALL_MEMBER_FUNCTION
-
-NS_IMETHODIMP
-nsExpandedPrincipal::Subsumes(nsIPrincipal* aOther, bool* aResult)
-{
-  return ::Subsumes(this, &nsIPrincipal::Subsumes, aOther, aResult);
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::SubsumesConsideringDomain(nsIPrincipal* aOther, bool* aResult)
-{
-  return ::Subsumes(this, &nsIPrincipal::SubsumesConsideringDomain, aOther, aResult);
+  return false;
 }
 
 NS_IMETHODIMP
 nsExpandedPrincipal::CheckMayLoad(nsIURI* uri, bool aReport, bool aAllowIfInheritsPrincipal)
 {
   nsresult rv;
   for (uint32_t i = 0; i < mPrincipals.Length(); ++i){
     rv = mPrincipals[i]->CheckMayLoad(uri, aReport, aAllowIfInheritsPrincipal);
@@ -912,51 +736,16 @@ nsExpandedPrincipal::GetURI(nsIURI** aUR
 NS_IMETHODIMP
 nsExpandedPrincipal::GetWhiteList(nsTArray<nsCOMPtr<nsIPrincipal> >** aWhiteList)
 {
   *aWhiteList = &mPrincipals;
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsExpandedPrincipal::GetJarPrefix(nsACString& aJarPrefix)
-{
-  aJarPrefix.Truncate();
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::GetAppStatus(uint16_t* aAppStatus)
-{
-  *aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::GetAppId(uint32_t* aAppId)
-{
-  *aAppId = nsIScriptSecurityManager::NO_APP_ID;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
-{
-  *aIsInBrowserElement = false;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsExpandedPrincipal::GetUnknownAppId(bool* aUnknownAppId)
-{
-  *aUnknownAppId = false;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
 nsExpandedPrincipal::GetBaseDomain(nsACString& aBaseDomain)
 {
   return NS_ERROR_NOT_AVAILABLE;
 }
 
 bool
 nsExpandedPrincipal::IsOnCSSUnprefixingWhitelist()
 {
--- a/caps/nsPrincipal.h
+++ b/caps/nsPrincipal.h
@@ -17,40 +17,29 @@
 #include "nsScriptSecurityManager.h"
 #include "mozilla/BasePrincipal.h"
 
 class nsPrincipal final : public mozilla::BasePrincipal
 {
 public:
   NS_DECL_NSISERIALIZABLE
   NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
-  NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
   NS_IMETHOD GetURI(nsIURI** aURI) override;
   NS_IMETHOD GetDomain(nsIURI** aDomain) override;
   NS_IMETHOD SetDomain(nsIURI* aDomain) override;
   NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
-  NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
-  NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
-  NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
-  NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
-  NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
-  NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
   NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
   virtual bool IsOnCSSUnprefixingWhitelist() override;
 
   nsPrincipal();
 
   // Init() must be called before the principal is in a usable state.
-  nsresult Init(nsIURI* aCodebase,
-                uint32_t aAppId,
-                bool aInMozBrowser);
+  nsresult Init(nsIURI* aCodebase, const OriginAttributes& aOriginAttributes);
 
   virtual void GetScriptLocation(nsACString& aStr) override;
   void SetURI(nsIURI* aURI);
 
   static bool IsPrincipalInherited(nsIURI* aURI) {
     // return true if the loadee URI has
     // the URI_INHERITS_SECURITY_CONTEXT flag set.
     bool doesInheritSecurityContext;
@@ -74,66 +63,53 @@ public:
 
   /**
    * Called at startup to setup static data, e.g. about:config pref-observers.
    */
   static void InitializeStatics();
 
   nsCOMPtr<nsIURI> mDomain;
   nsCOMPtr<nsIURI> mCodebase;
-  uint32_t mAppId;
-  bool mInMozBrowser;
   // If mCodebaseImmutable is true, mCodebase is non-null and immutable
   bool mCodebaseImmutable;
   bool mDomainImmutable;
   bool mInitialized;
   mozilla::Maybe<bool> mIsOnCSSUnprefixingWhitelist; // Lazily-computed
 
 protected:
   virtual ~nsPrincipal();
 
-  /**
-   * Returns the app status of the principal based on mAppId and mInMozBrowser.
-   */
-  uint16_t GetAppStatus();
+  bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override;
 };
 
 class nsExpandedPrincipal : public nsIExpandedPrincipal, public mozilla::BasePrincipal
 {
 public:
   explicit nsExpandedPrincipal(nsTArray< nsCOMPtr<nsIPrincipal> > &aWhiteList);
 
-protected:
-  virtual ~nsExpandedPrincipal();
-
-public:
   NS_DECL_NSIEXPANDEDPRINCIPAL
   NS_DECL_NSISERIALIZABLE
   NS_IMETHODIMP_(MozExternalRefCountType) AddRef() override { return nsJSPrincipals::AddRef(); };
   NS_IMETHODIMP_(MozExternalRefCountType) Release() override { return nsJSPrincipals::Release(); };
   NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
-  NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
   NS_IMETHOD GetURI(nsIURI** aURI) override;
   NS_IMETHOD GetDomain(nsIURI** aDomain) override;
   NS_IMETHOD SetDomain(nsIURI* aDomain) override;
   NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
-  NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
-  NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
-  NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
-  NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
-  NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
-  NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
   NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
   virtual bool IsOnCSSUnprefixingWhitelist() override;
   virtual void GetScriptLocation(nsACString &aStr) override;
 
+protected:
+  virtual ~nsExpandedPrincipal();
+
+  bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override;
+
 private:
   nsTArray< nsCOMPtr<nsIPrincipal> > mPrincipals;
 };
 
 #define NS_PRINCIPAL_CONTRACTID "@mozilla.org/principal;1"
 #define NS_PRINCIPAL_CID \
   { 0x09b7e598, 0x490d, 0x423f, \
     { 0xa8, 0xa6, 0x2e, 0x6c, 0x4e, 0xc8, 0x77, 0x50 }}
--- a/caps/nsScriptSecurityManager.cpp
+++ b/caps/nsScriptSecurityManager.cpp
@@ -482,36 +482,16 @@ nsScriptSecurityManager::HashPrincipalBy
 {
     nsCOMPtr<nsIURI> uri;
     aPrincipal->GetDomain(getter_AddRefs(uri));
     if (!uri)
         aPrincipal->GetURI(getter_AddRefs(uri));
     return SecurityHashURI(uri);
 }
 
-/* static */ bool
-nsScriptSecurityManager::AppAttributesEqual(nsIPrincipal* aFirst,
-                                            nsIPrincipal* aSecond)
-{
-    MOZ_ASSERT(aFirst && aSecond, "Don't pass null pointers!");
-
-    uint32_t firstAppId = nsIScriptSecurityManager::UNKNOWN_APP_ID;
-    if (!aFirst->GetUnknownAppId()) {
-        firstAppId = aFirst->GetAppId();
-    }
-
-    uint32_t secondAppId = nsIScriptSecurityManager::UNKNOWN_APP_ID;
-    if (!aSecond->GetUnknownAppId()) {
-        secondAppId = aSecond->GetAppId();
-    }
-
-    return ((firstAppId == secondAppId) &&
-            (aFirst->GetIsInBrowserElement() == aSecond->GetIsInBrowserElement()));
-}
-
 NS_IMETHODIMP
 nsScriptSecurityManager::CheckLoadURIFromScript(JSContext *cx, nsIURI *aURI)
 {
     // Get principal of currently executing script.
     MOZ_ASSERT(cx == nsContentUtils::GetCurrentJSContext());
     nsIPrincipal* principal = nsContentUtils::SubjectPrincipal();
     nsresult rv = CheckLoadURIWithPrincipal(principal, aURI,
                                             nsIScriptSecurityManager::STANDARD);
@@ -1017,19 +997,19 @@ nsScriptSecurityManager::CreateCodebaseP
             NS_ENSURE_TRUE(principal, NS_ERROR_FAILURE);
         }
 
         principal.forget(result);
 
         return NS_OK;
     }
 
+    BasePrincipal::OriginAttributes attrs(aAppId, aInMozBrowser);
     nsRefPtr<nsPrincipal> codebase = new nsPrincipal();
-
-    nsresult rv = codebase->Init(aURI, aAppId, aInMozBrowser);
+    nsresult rv = codebase->Init(aURI, attrs);
     if (NS_FAILED(rv))
         return rv;
 
     NS_ADDREF(*result = codebase);
 
     return NS_OK;
 }
 
--- a/caps/nsScriptSecurityManager.h
+++ b/caps/nsScriptSecurityManager.h
@@ -75,32 +75,16 @@ public:
     HashPrincipalByOrigin(nsIPrincipal* aPrincipal);
 
     static bool
     GetStrictFileOriginPolicy()
     {
         return sStrictFileOriginPolicy;
     }
 
-    /**
-     * Returns true if the two principals share the same app attributes.
-     *
-     * App attributes are appId and the inBrowserElement flag.
-     * Two principals have the same app attributes if those information are
-     * equals.
-     * This method helps keeping principals from different apps isolated from
-     * each other. Also, it helps making sure mozbrowser (web views) and their
-     * parent are isolated from each other. All those entities do not share the
-     * same data (cookies, IndexedDB, localStorage, etc.) so we shouldn't allow
-     * violating that principle.
-     */
-    static bool
-    AppAttributesEqual(nsIPrincipal* aFirst,
-                       nsIPrincipal* aSecond);
-
     void DeactivateDomainPolicy();
 
 private:
 
     // GetScriptSecurityManager is the only call that can make one
     nsScriptSecurityManager();
     virtual ~nsScriptSecurityManager();
 
--- a/caps/nsSystemPrincipal.cpp
+++ b/caps/nsSystemPrincipal.cpp
@@ -37,43 +37,16 @@ nsSystemPrincipal::GetScriptLocation(nsA
     aStr.AssignLiteral(SYSTEM_PRINCIPAL_SPEC);
 }
 
 ///////////////////////////////////////
 // Methods implementing nsIPrincipal //
 ///////////////////////////////////////
 
 NS_IMETHODIMP
-nsSystemPrincipal::Equals(nsIPrincipal *other, bool *result)
-{
-    *result = (other == this);
-    return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::EqualsConsideringDomain(nsIPrincipal *other, bool *result)
-{
-    return Equals(other, result);
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::Subsumes(nsIPrincipal *other, bool *result)
-{
-    *result = true;
-    return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::SubsumesConsideringDomain(nsIPrincipal *other, bool *result)
-{
-    *result = true;
-    return NS_OK;
-}
-
-NS_IMETHODIMP
 nsSystemPrincipal::CheckMayLoad(nsIURI* uri, bool aReport, bool aAllowIfInheritsPrincipal)
 {
     return NS_OK;
 }
 
 NS_IMETHODIMP
 nsSystemPrincipal::GetHashValue(uint32_t *result)
 {
@@ -118,51 +91,16 @@ nsSystemPrincipal::GetDomain(nsIURI** aD
 
 NS_IMETHODIMP
 nsSystemPrincipal::SetDomain(nsIURI* aDomain)
 {
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsSystemPrincipal::GetJarPrefix(nsACString& aJarPrefix)
-{
-  aJarPrefix.Truncate();
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::GetAppStatus(uint16_t* aAppStatus)
-{
-  *aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::GetAppId(uint32_t* aAppId)
-{
-  *aAppId = nsIScriptSecurityManager::NO_APP_ID;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
-{
-  *aIsInBrowserElement = false;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-nsSystemPrincipal::GetUnknownAppId(bool* aUnknownAppId)
-{
-  *aUnknownAppId = false;
-  return NS_OK;
-}
-
-NS_IMETHODIMP
 nsSystemPrincipal::GetBaseDomain(nsACString& aBaseDomain)
 {
   // No base domain for chrome.
   return NS_OK;
 }
 
 //////////////////////////////////////////
 // Methods implementing nsISerializable //
--- a/caps/nsSystemPrincipal.h
+++ b/caps/nsSystemPrincipal.h
@@ -20,36 +20,32 @@
 #define NS_SYSTEMPRINCIPAL_CONTRACTID "@mozilla.org/systemprincipal;1"
 
 
 class nsSystemPrincipal final : public mozilla::BasePrincipal
 {
 public:
   NS_DECL_NSISERIALIZABLE
   NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
-  NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
   NS_IMETHOD GetURI(nsIURI** aURI) override;
   NS_IMETHOD GetDomain(nsIURI** aDomain) override;
   NS_IMETHOD SetDomain(nsIURI* aDomain) override;
   NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
-  NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
-  NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
   NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
   NS_IMETHOD GetCsp(nsIContentSecurityPolicy** aCsp) override;
   NS_IMETHOD SetCsp(nsIContentSecurityPolicy* aCsp) override;
-  NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
-  NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
-  NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
-  NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
-  NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
   NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
 
   nsSystemPrincipal() {}
 
   virtual void GetScriptLocation(nsACString &aStr) override;
 
 protected:
   virtual ~nsSystemPrincipal(void) {}
+
+  bool SubsumesInternal(nsIPrincipal *aOther, DocumentDomainConsideration aConsideration) override
+  {
+    return true;
+  }
 };
 
 #endif // nsSystemPrincipal_h__
--- a/dom/base/nsContentList.cpp
+++ b/dom/base/nsContentList.cpp
@@ -148,17 +148,17 @@ NS_IMPL_RELEASE_INHERITED(nsSimpleConten
 
 JSObject*
 nsSimpleContentList::WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto)
 {
   return NodeListBinding::Wrap(cx, this, aGivenProto);
 }
 
 // Hashtable for storing nsContentLists
-static PLDHashTable gContentListHashTable;
+static PLDHashTable2* gContentListHashTable;
 
 #define RECENTLY_USED_CONTENT_LIST_CACHE_SIZE 31
 static nsContentList*
   sRecentlyUsedContentLists[RECENTLY_USED_CONTENT_LIST_CACHE_SIZE] = {};
 
 static MOZ_ALWAYS_INLINE uint32_t
 RecentlyUsedCacheIndex(const nsContentListKey& aKey)
 {
@@ -210,29 +210,27 @@ NS_GetContentList(nsINode* aRootNode,
   {
     ContentListHashtableHashKey,
     ContentListHashtableMatchEntry,
     PL_DHashMoveEntryStub,
     PL_DHashClearEntryStub
   };
 
   // Initialize the hashtable if needed.
-  if (!gContentListHashTable.IsInitialized()) {
-    PL_DHashTableInit(&gContentListHashTable, &hash_table_ops,
-                      sizeof(ContentListHashEntry));
+  if (!gContentListHashTable) {
+    gContentListHashTable =
+      new PLDHashTable2(&hash_table_ops, sizeof(ContentListHashEntry));
   }
 
   ContentListHashEntry *entry = nullptr;
   // First we look in our hashtable.  Then we create a content list if needed
-  if (gContentListHashTable.IsInitialized()) {
-    entry = static_cast<ContentListHashEntry *>
-      (PL_DHashTableAdd(&gContentListHashTable, &hashKey, fallible));
-    if (entry)
-      list = entry->mContentList;
-  }
+  entry = static_cast<ContentListHashEntry *>
+    (PL_DHashTableAdd(gContentListHashTable, &hashKey, fallible));
+  if (entry)
+    list = entry->mContentList;
 
   if (!list) {
     // We need to create a ContentList and add it to our new entry, if
     // we have an entry
     nsCOMPtr<nsIAtom> xmlAtom = do_GetAtom(aTagname);
     nsCOMPtr<nsIAtom> htmlAtom;
     if (aMatchNameSpaceId == kNameSpaceID_Unknown) {
       nsAutoString lowercaseName;
@@ -267,17 +265,17 @@ nsCacheableFuncStringNodeList::WrapObjec
 
 JSObject*
 nsCacheableFuncStringHTMLCollection::WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto)
 {
   return HTMLCollectionBinding::Wrap(cx, this, aGivenProto);
 }
 
 // Hashtable for storing nsCacheableFuncStringContentList
-static PLDHashTable gFuncStringContentListHashTable;
+static PLDHashTable2* gFuncStringContentListHashTable;
 
 struct FuncStringContentListHashEntry : public PLDHashEntryHdr
 {
   nsCacheableFuncStringContentList* mContentList;
 };
 
 static PLDHashNumber
 FuncStringContentListHashtableHashKey(PLDHashTable *table, const void *key)
@@ -316,28 +314,28 @@ GetFuncStringContentList(nsINode* aRootN
   {
     FuncStringContentListHashtableHashKey,
     FuncStringContentListHashtableMatchEntry,
     PL_DHashMoveEntryStub,
     PL_DHashClearEntryStub
   };
 
   // Initialize the hashtable if needed.
-  if (!gFuncStringContentListHashTable.IsInitialized()) {
-    PL_DHashTableInit(&gFuncStringContentListHashTable, &hash_table_ops,
-                      sizeof(FuncStringContentListHashEntry));
+  if (!gFuncStringContentListHashTable) {
+    gFuncStringContentListHashTable =
+      new PLDHashTable2(&hash_table_ops, sizeof(FuncStringContentListHashEntry));
   }
 
   FuncStringContentListHashEntry *entry = nullptr;
   // First we look in our hashtable.  Then we create a content list if needed
-  if (gFuncStringContentListHashTable.IsInitialized()) {
+  if (gFuncStringContentListHashTable) {
     nsFuncStringCacheKey hashKey(aRootNode, aFunc, aString);
 
     entry = static_cast<FuncStringContentListHashEntry *>
-      (PL_DHashTableAdd(&gFuncStringContentListHashTable, &hashKey, fallible));
+      (PL_DHashTableAdd(gFuncStringContentListHashTable, &hashKey, fallible));
     if (entry) {
       list = entry->mContentList;
 #ifdef DEBUG
       MOZ_ASSERT_IF(list, list->mType == ListType::sType);
 #endif
     }
   }
 
@@ -965,23 +963,24 @@ nsContentList::RemoveFromHashtable()
   
   nsDependentAtomString str(mXMLMatchAtom);
   nsContentListKey key(mRootNode, mMatchNameSpaceId, str);
   uint32_t recentlyUsedCacheIndex = RecentlyUsedCacheIndex(key);
   if (sRecentlyUsedContentLists[recentlyUsedCacheIndex] == this) {
     sRecentlyUsedContentLists[recentlyUsedCacheIndex] = nullptr;
   }
 
-  if (!gContentListHashTable.IsInitialized())
+  if (!gContentListHashTable)
     return;
 
-  PL_DHashTableRemove(&gContentListHashTable, &key);
+  PL_DHashTableRemove(gContentListHashTable, &key);
 
-  if (gContentListHashTable.EntryCount() == 0) {
-    PL_DHashTableFinish(&gContentListHashTable);
+  if (gContentListHashTable->EntryCount() == 0) {
+    delete gContentListHashTable;
+    gContentListHashTable = nullptr;
   }
 }
 
 void
 nsContentList::BringSelfUpToDate(bool aDoFlush)
 {
   if (mRootNode && aDoFlush && mFlushesNeeded) {
     // XXX sXBL/XBL2 issue
@@ -1003,25 +1002,26 @@ nsContentList::BringSelfUpToDate(bool aD
 nsCacheableFuncStringContentList::~nsCacheableFuncStringContentList()
 {
   RemoveFromFuncStringHashtable();
 }
 
 void
 nsCacheableFuncStringContentList::RemoveFromFuncStringHashtable()
 {
-  if (!gFuncStringContentListHashTable.IsInitialized()) {
+  if (!gFuncStringContentListHashTable) {
     return;
   }
 
   nsFuncStringCacheKey key(mRootNode, mFunc, mString);
-  PL_DHashTableRemove(&gFuncStringContentListHashTable, &key);
+  PL_DHashTableRemove(gFuncStringContentListHashTable, &key);
 
-  if (gFuncStringContentListHashTable.EntryCount() == 0) {
-    PL_DHashTableFinish(&gFuncStringContentListHashTable);
+  if (gFuncStringContentListHashTable->EntryCount() == 0) {
+    delete gFuncStringContentListHashTable;
+    gFuncStringContentListHashTable = nullptr;
   }
 }
 
 #ifdef DEBUG_CONTENT_LIST
 void
 nsContentList::AssertInSync()
 {
   if (mState == LIST_DIRTY) {
--- a/dom/base/nsContentUtils.cpp
+++ b/dom/base/nsContentUtils.cpp
@@ -331,35 +331,35 @@ static const nsAttrValue::EnumTable kAut
   { 0 }
 };
 
 namespace {
 
 static NS_DEFINE_CID(kParserServiceCID, NS_PARSERSERVICE_CID);
 static NS_DEFINE_CID(kCParserCID, NS_PARSER_CID);
 
-static PLDHashTable sEventListenerManagersHash;
+static PLDHashTable2* sEventListenerManagersHash;
 
 class DOMEventListenerManagersHashReporter final : public nsIMemoryReporter
 {
   MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
 
   ~DOMEventListenerManagersHashReporter() {}
 
 public:
   NS_DECL_ISUPPORTS
 
   NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
                             nsISupports* aData, bool aAnonymize) override
   {
     // We don't measure the |EventListenerManager| objects pointed to by the
     // entries because those references are non-owning.
-    int64_t amount = sEventListenerManagersHash.IsInitialized()
+    int64_t amount = sEventListenerManagersHash
                    ? PL_DHashTableSizeOfExcludingThis(
-                       &sEventListenerManagersHash, nullptr, MallocSizeOf)
+                       sEventListenerManagersHash, nullptr, MallocSizeOf)
                    : 0;
 
     return MOZ_COLLECT_REPORT(
       "explicit/dom/event-listener-managers-hash", KIND_HEAP, UNITS_BYTES,
       amount,
       "Memory used by the event listener manager's hash table.");
   }
 };
@@ -482,28 +482,28 @@ nsContentUtils::Init()
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = CallGetService(NS_WBRK_CONTRACTID, &sWordBreaker);
   NS_ENSURE_SUCCESS(rv, rv);
 
   if (!InitializeEventTable())
     return NS_ERROR_FAILURE;
 
-  if (!sEventListenerManagersHash.IsInitialized()) {
+  if (!sEventListenerManagersHash) {
     static const PLDHashTableOps hash_table_ops =
     {
       PL_DHashVoidPtrKeyStub,
       PL_DHashMatchEntryStub,
       PL_DHashMoveEntryStub,
       EventListenerManagerHashClearEntry,
       EventListenerManagerHashInitEntry
     };
 
-    PL_DHashTableInit(&sEventListenerManagersHash, &hash_table_ops,
-                      sizeof(EventListenerManagerMapEntry));
+    sEventListenerManagersHash =
+      new PLDHashTable2(&hash_table_ops, sizeof(EventListenerManagerMapEntry));
 
     RegisterStrongMemoryReporter(new DOMEventListenerManagersHashReporter());
   }
 
   sBlockedScriptRunners = new nsTArray< nsCOMPtr<nsIRunnable> >;
 
   Preferences::AddBoolVarCache(&sAllowXULXBL_for_file,
                                "dom.allow_XUL_XBL_for_file");
@@ -1798,31 +1798,32 @@ nsContentUtils::Shutdown()
 
   delete sAtomEventTable;
   sAtomEventTable = nullptr;
   delete sStringEventTable;
   sStringEventTable = nullptr;
   delete sUserDefinedEvents;
   sUserDefinedEvents = nullptr;
 
-  if (sEventListenerManagersHash.IsInitialized()) {
-    NS_ASSERTION(sEventListenerManagersHash.EntryCount() == 0,
+  if (sEventListenerManagersHash) {
+    NS_ASSERTION(sEventListenerManagersHash->EntryCount() == 0,
                  "Event listener manager hash not empty at shutdown!");
 
     // See comment above.
 
     // However, we have to handle this table differently.  If it still
     // has entries, we want to leak it too, so that we can keep it alive
     // in case any elements are destroyed.  Because if they are, we need
     // their event listener managers to be destroyed too, or otherwise
     // it could leave dangling references in DOMClassInfo's preserved
     // wrapper table.
 
-    if (sEventListenerManagersHash.EntryCount() == 0) {
-      PL_DHashTableFinish(&sEventListenerManagersHash);
+    if (sEventListenerManagersHash->EntryCount() == 0) {
+      delete sEventListenerManagersHash;
+      sEventListenerManagersHash = nullptr;
     }
   }
 
   NS_ASSERTION(!sBlockedScriptRunners ||
                sBlockedScriptRunners->Length() == 0,
                "How'd this happen?");
   delete sBlockedScriptRunners;
   sBlockedScriptRunners = nullptr;
@@ -3977,54 +3978,54 @@ ListenerEnumerator(PLDHashTable* aTable,
     }
   }
   return PL_DHASH_NEXT;
 }
 
 void
 nsContentUtils::UnmarkGrayJSListenersInCCGenerationDocuments(uint32_t aGeneration)
 {
-  if (sEventListenerManagersHash.IsInitialized()) {
-    PL_DHashTableEnumerate(&sEventListenerManagersHash, ListenerEnumerator,
+  if (sEventListenerManagersHash) {
+    PL_DHashTableEnumerate(sEventListenerManagersHash, ListenerEnumerator,
                            &aGeneration);
   }
 }
 
 /* static */
 void
 nsContentUtils::TraverseListenerManager(nsINode *aNode,
                                         nsCycleCollectionTraversalCallback &cb)
 {
-  if (!sEventListenerManagersHash.IsInitialized()) {
+  if (!sEventListenerManagersHash) {
     // We're already shut down, just return.
     return;
   }
 
   EventListenerManagerMapEntry *entry =
     static_cast<EventListenerManagerMapEntry *>
-               (PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
+               (PL_DHashTableSearch(sEventListenerManagersHash, aNode));
   if (entry) {
     CycleCollectionNoteChild(cb, entry->mListenerManager.get(),
                              "[via hash] mListenerManager");
   }
 }
 
 EventListenerManager*
 nsContentUtils::GetListenerManagerForNode(nsINode *aNode)
 {
-  if (!sEventListenerManagersHash.IsInitialized()) {
+  if (!sEventListenerManagersHash) {
     // We're already shut down, don't bother creating an event listener
     // manager.
 
     return nullptr;
   }
 
   EventListenerManagerMapEntry *entry =
     static_cast<EventListenerManagerMapEntry *>
-      (PL_DHashTableAdd(&sEventListenerManagersHash, aNode, fallible));
+      (PL_DHashTableAdd(sEventListenerManagersHash, aNode, fallible));
 
   if (!entry) {
     return nullptr;
   }
 
   if (!entry->mListenerManager) {
     entry->mListenerManager = new EventListenerManager(aNode);
 
@@ -4036,47 +4037,47 @@ nsContentUtils::GetListenerManagerForNod
 
 EventListenerManager*
 nsContentUtils::GetExistingListenerManagerForNode(const nsINode *aNode)
 {
   if (!aNode->HasFlag(NODE_HAS_LISTENERMANAGER)) {
     return nullptr;
   }
 
-  if (!sEventListenerManagersHash.IsInitialized()) {
+  if (!sEventListenerManagersHash) {
     // We're already shut down, don't bother creating an event listener
     // manager.
 
     return nullptr;
   }
 
   EventListenerManagerMapEntry *entry =
     static_cast<EventListenerManagerMapEntry *>
-               (PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
+               (PL_DHashTableSearch(sEventListenerManagersHash, aNode));
   if (entry) {
     return entry->mListenerManager;
   }
 
   return nullptr;
 }
 
 /* static */
 void
 nsContentUtils::RemoveListenerManager(nsINode *aNode)
 {
-  if (sEventListenerManagersHash.IsInitialized()) {
+  if (sEventListenerManagersHash) {
     EventListenerManagerMapEntry *entry =
       static_cast<EventListenerManagerMapEntry *>
-                 (PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
+                 (PL_DHashTableSearch(sEventListenerManagersHash, aNode));
     if (entry) {
       nsRefPtr<EventListenerManager> listenerManager;
       listenerManager.swap(entry->mListenerManager);
       // Remove the entry and *then* do operations that could cause further
       // modification of sEventListenerManagersHash.  See bug 334177.
-      PL_DHashTableRawRemove(&sEventListenerManagersHash, entry);
+      PL_DHashTableRawRemove(sEventListenerManagersHash, entry);
       if (listenerManager) {
         listenerManager->Disconnect();
       }
     }
   }
 }
 
 /* static */
@@ -7744,9 +7745,9 @@ nsContentUtils::FirePageShowEvent(nsIDoc
     }
   }
 
   nsCOMPtr<nsIDocument> doc = aItem->GetDocument();
   NS_ASSERTION(doc, "What happened here?");
   if (doc->IsShowing() == aFireIfShowing) {
     doc->OnPageShow(true, aChromeEventHandler);
   }
-}
\ No newline at end of file
+}
--- a/dom/base/nsDOMMutationObserver.cpp
+++ b/dom/base/nsDOMMutationObserver.cpp
@@ -755,17 +755,17 @@ nsDOMMutationObserver::HandleMutation()
   if (!mPendingMutationCount || !outer ||
       outer->GetCurrentInnerWindow() != mOwner) {
     ClearPendingRecords();
     return;
   }
 
   mozilla::dom::Sequence<mozilla::dom::OwningNonNull<nsDOMMutationRecord> >
     mutations;
-  if (mutations.SetCapacity(mPendingMutationCount)) {
+  if (mutations.SetCapacity(mPendingMutationCount, mozilla::fallible)) {
     // We can't use TakeRecords easily here, because it deals with a
     // different type of array, and we want to optimize out any extra copying.
     nsRefPtr<nsDOMMutationRecord> current;
     current.swap(mFirstPendingMutation);
     for (uint32_t i = 0; i < mPendingMutationCount; ++i) {
       nsRefPtr<nsDOMMutationRecord> next;
       current->mNext.swap(next);
       *mutations.AppendElement() = current;
--- a/dom/base/nsJSTimeoutHandler.cpp
+++ b/dom/base/nsJSTimeoutHandler.cpp
@@ -358,17 +358,17 @@ nsJSScriptTimeoutHandler::Init(nsGlobalW
     // Create our arg array.  argc is the number of arguments passed
     // to setTimeout or setInterval; the first two are our callback
     // and the delay, so only arguments after that need to go in our
     // array.
     // std::max(argc - 2, 0) wouldn't work right because argc is unsigned.
     uint32_t argCount = std::max(argc, 2u) - 2;
 
     FallibleTArray<JS::Heap<JS::Value> > args;
-    if (!args.SetCapacity(argCount)) {
+    if (!args.SetCapacity(argCount, fallible)) {
       // No need to drop here, since we already have a non-null mFunction
       return NS_ERROR_OUT_OF_MEMORY;
     }
     for (uint32_t idx = 0; idx < argCount; ++idx) {
       *args.AppendElement() = argv[idx + 2];
     }
     args.SwapElements(mArgs);
   } else {
--- a/dom/bindings/Codegen.py
+++ b/dom/bindings/Codegen.py
@@ -5597,17 +5597,17 @@ class CGArgumentConverter(CGThing):
         replacer["elemType"] = typeConversion.declType.define()
 
         # NOTE: Keep this in sync with sequence conversions as needed
         variadicConversion = string.Template(
             "${seqType} ${declName};\n" +
             rooterDecl +
             dedent("""
                 if (${argc} > ${index}) {
-                  if (!${declName}.SetCapacity(${argc} - ${index})) {
+                  if (!${declName}.SetCapacity(${argc} - ${index}, mozilla::fallible)) {
                     JS_ReportOutOfMemory(cx);
                     return false;
                   }
                   for (uint32_t variadicArg = ${index}; variadicArg < ${argc}; ++variadicArg) {
                     ${elemType}& slot = *${declName}.AppendElement();
                 """)
         ).substitute(replacer)
 
--- a/dom/camera/DOMCameraControl.cpp
+++ b/dom/camera/DOMCameraControl.cpp
@@ -1369,17 +1369,17 @@ void
 nsDOMCameraControl::OnFacesDetected(const nsTArray<ICameraControl::Face>& aFaces)
 {
   DOM_CAMERA_LOGI("DOM OnFacesDetected %zu face(s)\n", aFaces.Length());
   MOZ_ASSERT(NS_IsMainThread());
 
   Sequence<OwningNonNull<DOMCameraDetectedFace> > faces;
   uint32_t len = aFaces.Length();
 
-  if (faces.SetCapacity(len)) {
+  if (faces.SetCapacity(len, fallible)) {
     for (uint32_t i = 0; i < len; ++i) {
       *faces.AppendElement() =
         new DOMCameraDetectedFace(static_cast<DOMMediaStream*>(this), aFaces[i]);
     }
   }
 
   CameraFacesDetectedEventInit eventInit;
   eventInit.mFaces.SetValue(faces);
--- a/dom/canvas/CanvasRenderingContext2D.cpp
+++ b/dom/canvas/CanvasRenderingContext2D.cpp
@@ -14,18 +14,16 @@
 
 #include "nsContentUtils.h"
 
 #include "nsIDocument.h"
 #include "mozilla/dom/HTMLCanvasElement.h"
 #include "nsSVGEffects.h"
 #include "nsPresContext.h"
 #include "nsIPresShell.h"
-#include "nsWidgetsCID.h"
-#include "nsIAppShell.h"
 
 #include "nsIInterfaceRequestorUtils.h"
 #include "nsIFrame.h"
 #include "nsError.h"
 
 #include "nsCSSParser.h"
 #include "mozilla/css/StyleRule.h"
 #include "mozilla/css/Declaration.h"
@@ -111,17 +109,16 @@
 #include "SVGContentUtils.h"
 #include "SVGImageContext.h"
 #include "nsIScreenManager.h"
 #include "nsFilterInstance.h"
 #include "nsSVGLength2.h"
 #include "nsDeviceContext.h"
 #include "nsFontMetrics.h"
 #include "Units.h"
-#include "mozilla/Services.h"
 
 #undef free // apparently defined by some windows header, clashing with a free()
             // method in SkTypes.h
 #include "SkiaGLGlue.h"
 #ifdef USE_SKIA
 #include "SurfaceTypes.h"
 #include "GLBlitHelper.h"
 #endif
@@ -177,74 +174,16 @@ public:
       gCanvasAzureMemoryUsed,
       "Memory used by 2D canvases. Each canvas requires "
       "(width * height * 4) bytes.");
   }
 };
 
 NS_IMPL_ISUPPORTS(Canvas2dPixelsReporter, nsIMemoryReporter)
 
-class CanvasShutdownObserver : public nsIObserver
-{
-  virtual ~CanvasShutdownObserver() {}
-
-public:
-  NS_DECL_ISUPPORTS
-
-  explicit CanvasShutdownObserver(CanvasRenderingContext2D* aCanvas)
-    : mCanvas(aCanvas)
-  {
-    nsCOMPtr<nsIObserverService> observerService =
-      mozilla::services::GetObserverService();
-    observerService->AddObserver(this, NS_XPCOM_WILL_SHUTDOWN_OBSERVER_ID, false);
-  }
-
-  void Shutdown() {
-    nsCOMPtr<nsIObserverService> observerService =
-      mozilla::services::GetObserverService();
-    observerService->RemoveObserver(this, NS_XPCOM_WILL_SHUTDOWN_OBSERVER_ID);
-  }
-
-  NS_IMETHOD Observe(nsISupports* aSubject,
-                     const char* aTopic,
-                     const char16_t* aData) override
-  {
-    mCanvas->ShutdownTaskQueue();
-    return NS_OK;
-  }
-
-private:
-  CanvasRenderingContext2D* mCanvas;
-};
-
-NS_IMPL_ISUPPORTS(CanvasShutdownObserver, nsIObserver);
-
-
-static NS_DEFINE_CID(kAppShellCID, NS_APPSHELL_CID);
-
-void
-CanvasRenderingContext2D::RecordCommand()
-{
-  static uint32_t kBatchSize = 5;
-  if (++mPendingCommands > kBatchSize) {
-    mPendingCommands = 0;
-    FlushDelayedTarget();
-    return;
-  }
-
-  if (mScheduledFlush) {
-    return;
-  }
-
-  mScheduledFlush = true;
-  nsCOMPtr<nsIAppShell> appShell = do_GetService(kAppShellCID);
-  nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &CanvasRenderingContext2D::StableStateReached);
-  appShell->RunInStableState(r);
-}
-
 class CanvasRadialGradient : public CanvasGradient
 {
 public:
   CanvasRadialGradient(CanvasRenderingContext2D* aContext,
                        const Point &aBeginOrigin, Float aBeginRadius,
                        const Point &aEndOrigin, Float aEndRadius)
     : CanvasGradient(aContext, Type::RADIAL)
     , mCenter1(aBeginOrigin)
@@ -449,21 +388,16 @@ public:
       mFinalTarget, mCtx->CurrentState().filter,
       mgfx::Rect(mPostFilterBounds),
       snapshot, mSourceGraphicRect,
       fillPaint, mFillPaintRect,
       strokePaint, mStrokePaintRect,
       mCtx->CurrentState().filterAdditionalImages,
       mPostFilterBounds.TopLeft() - mOffset,
       DrawOptions(1.0f, mCompositionOp));
-
-    // DrawTargetCapture doesn't properly support filter nodes because they are
-    // mutable. Block until drawing is done to avoid races.
-    mCtx->FlushDelayedTarget();
-    mCtx->FinishDelayedRendering();
   }
 
   DrawTarget* DT()
   {
     return mTarget;
   }
 
 private:
@@ -878,19 +812,16 @@ public:
   static void PreTransactionCallback(void* aData)
   {
     CanvasRenderingContext2DUserData* self =
       static_cast<CanvasRenderingContext2DUserData*>(aData);
     CanvasRenderingContext2D* context = self->mContext;
     if (!context || !context->mTarget)
       return;
 
-    context->FlushDelayedTarget();
-    context->FinishDelayedRendering();
-
     // Since SkiaGL default to store drawing command until flush
     // We will have to flush it before present.
     context->mTarget->Flush();
   }
 
   static void DidTransactionCallback(void* aData)
   {
     CanvasRenderingContext2DUserData* self =
@@ -1002,48 +933,35 @@ CanvasRenderingContext2D::CanvasRenderin
 #ifdef USE_SKIA_GPU
   , mVideoTexture(0)
 #endif
   // these are the default values from the Canvas spec
   , mWidth(0), mHeight(0)
   , mZero(false), mOpaque(false)
   , mResetLayer(true)
   , mIPC(false)
-  , mPendingCommands(0)
-  , mScheduledFlush(false)
   , mDrawObserver(nullptr)
   , mIsEntireFrameInvalid(false)
   , mPredictManyRedrawCalls(false), mPathTransformWillUpdate(false)
   , mInvalidateCount(0)
 {
   sNumLivingContexts++;
 
-#ifdef XP_MACOSX
-  // Restrict async rendering to OSX for now until the failures on other
-  // platforms get resolved.
-  mTaskQueue = new MediaTaskQueue(SharedThreadPool::Get(NS_LITERAL_CSTRING("Canvas Rendering"),
-                                                        4));
-  mShutdownObserver = new CanvasShutdownObserver(this);
-#endif
-
   // The default is to use OpenGL mode
   if (!gfxPlatform::GetPlatform()->UseAcceleratedSkiaCanvas()) {
     mRenderingMode = RenderingMode::SoftwareBackendMode;
   }
 
   if (gfxPlatform::GetPlatform()->HaveChoiceOfHWAndSWCanvas()) {
     mDrawObserver = new CanvasDrawObserver(this);
   }
 }
 
 CanvasRenderingContext2D::~CanvasRenderingContext2D()
 {
-  if (mTaskQueue) {
-    ShutdownTaskQueue();
-  }
   RemoveDrawObserver();
   RemovePostRefreshObserver();
   Reset();
   // Drop references from all CanvasRenderingContext2DUserData to this context
   for (uint32_t i = 0; i < mUserDatas.Length(); ++i) {
     mUserDatas[i]->Forget();
   }
   sNumLivingContexts--;
@@ -1056,29 +974,16 @@ CanvasRenderingContext2D::~CanvasRenderi
     gfxPlatform::GetPlatform()->GetSkiaGLGlue()->GetGLContext()->MakeCurrent();
     gfxPlatform::GetPlatform()->GetSkiaGLGlue()->GetGLContext()->fDeleteTextures(1, &mVideoTexture);
   }
 #endif
 
   RemoveDemotableContext(this);
 }
 
-void
-CanvasRenderingContext2D::ShutdownTaskQueue()
-{
-  mShutdownObserver->Shutdown();
-  mShutdownObserver = nullptr;
-  FlushDelayedTarget();
-  FinishDelayedRendering();
-  mTaskQueue->BeginShutdown();
-  mTaskQueue = nullptr;
-  mDelayedTarget = nullptr;
-}
-
-
 JSObject*
 CanvasRenderingContext2D::WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto)
 {
   return CanvasRenderingContext2DBinding::Wrap(cx, this, aGivenProto);
 }
 
 bool
 CanvasRenderingContext2D::ParseColor(const nsAString& aString,
@@ -1124,20 +1029,17 @@ CanvasRenderingContext2D::Reset()
   }
 
   // only do this for non-docshell created contexts,
   // since those are the ones that we created a surface for
   if (mTarget && IsTargetValid() && !mDocShell) {
     gCanvasAzureMemoryUsed -= mWidth * mHeight * 4;
   }
 
-  FinishDelayedRendering();
   mTarget = nullptr;
-  mDelayedTarget = nullptr;
-  mFinalTarget = nullptr;
 
   // reset hit regions
   mHitRegionsOptions.ClearAndRetainStorage();
 
   // Since the target changes the backing texture will change, and this will
   // no longer be valid.
   mIsEntireFrameInvalid = false;
   mPredictManyRedrawCalls = false;
@@ -1194,18 +1096,16 @@ CanvasRenderingContext2D::StyleColorToSt
     aStr.AppendFloat(nsStyleUtil::ColorComponentToFloat(NS_GET_A(aColor)));
     aStr.Append(')');
   }
 }
 
 nsresult
 CanvasRenderingContext2D::Redraw()
 {
-  RecordCommand();
-
   if (mIsEntireFrameInvalid) {
     return NS_OK;
   }
 
   mIsEntireFrameInvalid = true;
 
   if (!mCanvasElement) {
     NS_ASSERTION(mDocShell, "Redraw with no canvas element or docshell!");
@@ -1217,17 +1117,16 @@ CanvasRenderingContext2D::Redraw()
   mCanvasElement->InvalidateCanvasContent(nullptr);
 
   return NS_OK;
 }
 
 void
 CanvasRenderingContext2D::Redraw(const mgfx::Rect &r)
 {
-  RecordCommand();
   ++mInvalidateCount;
 
   if (mIsEntireFrameInvalid) {
     return;
   }
 
   if (mPredictManyRedrawCalls ||
     mInvalidateCount > kCanvasMaxInvalidateCount) {
@@ -1240,28 +1139,16 @@ CanvasRenderingContext2D::Redraw(const m
     return;
   }
 
   nsSVGEffects::InvalidateDirectRenderingObservers(mCanvasElement);
 
   mCanvasElement->InvalidateCanvasContent(&r);
 }
 
-TemporaryRef<SourceSurface>
-CanvasRenderingContext2D::GetSurfaceSnapshot(bool* aPremultAlpha /* = nullptr */)
-{
-  EnsureTarget();
-  if (aPremultAlpha) {
-    *aPremultAlpha = true;
-  }
-  FlushDelayedTarget();
-  FinishDelayedRendering();
-  return mFinalTarget->Snapshot();
-}
-
 void
 CanvasRenderingContext2D::DidRefresh()
 {
   if (IsTargetValid() && SkiaGLTex()) {
     SkiaGLGlue* glue = gfxPlatform::GetPlatform()->GetSkiaGLGlue();
     MOZ_ASSERT(glue);
 
     auto gl = glue->GetGLContext();
@@ -1269,17 +1156,16 @@ CanvasRenderingContext2D::DidRefresh()
   }
 }
 
 void
 CanvasRenderingContext2D::RedrawUser(const gfxRect& r)
 {
   if (mIsEntireFrameInvalid) {
     ++mInvalidateCount;
-    RecordCommand();
     return;
   }
 
   mgfx::Rect newr =
     mTarget->GetTransform().TransformBounds(ToRect(r));
   Redraw(newr);
 }
 
@@ -1295,17 +1181,17 @@ bool CanvasRenderingContext2D::SwitchRen
       gfxPlatform::GetPlatform()->GetSkiaGLGlue()->GetGLContext()->MakeCurrent();
       gfxPlatform::GetPlatform()->GetSkiaGLGlue()->GetGLContext()->fDeleteTextures(1, &mVideoTexture);
     }
 	  mCurrentVideoSize.width = 0;
 	  mCurrentVideoSize.height = 0;
   }
 #endif
 
-  RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
+  RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
   RefPtr<DrawTarget> oldTarget = mTarget;
   mTarget = nullptr;
   mResetLayer = true;
 
   // Recreate target using the new rendering mode
   RenderingMode attemptedMode = EnsureTarget(aRenderingMode);
   if (!IsTargetValid())
     return false;
@@ -1469,50 +1355,38 @@ CanvasRenderingContext2D::EnsureTarget(R
           gfxPlatform::GetPlatform()->UseAcceleratedSkiaCanvas() &&
           CheckSizeForSkiaGL(size)) {
         DemoteOldestContextIfNecessary();
 
 #if USE_SKIA_GPU
         SkiaGLGlue* glue = gfxPlatform::GetPlatform()->GetSkiaGLGlue();
 
         if (glue && glue->GetGrContext() && glue->GetGLContext()) {
-          mFinalTarget = Factory::CreateDrawTargetSkiaWithGrContext(glue->GetGrContext(), size, format);
+          mTarget = Factory::CreateDrawTargetSkiaWithGrContext(glue->GetGrContext(), size, format);
           if (mTarget) {
             AddDemotableContext(this);
           } else {
             printf_stderr("Failed to create a SkiaGL DrawTarget, falling back to software\n");
             mode = RenderingMode::SoftwareBackendMode;
           }
         }
 #endif
-        if (!mFinalTarget) {
-          mFinalTarget = layerManager->CreateDrawTarget(size, format);
+        if (!mTarget) {
+          mTarget = layerManager->CreateDrawTarget(size, format);
         }
       } else {
-        mFinalTarget = layerManager->CreateDrawTarget(size, format);
+        mTarget = layerManager->CreateDrawTarget(size, format);
         mode = RenderingMode::SoftwareBackendMode;
       }
      } else {
-        mFinalTarget = gfxPlatform::GetPlatform()->CreateOffscreenCanvasDrawTarget(size, format);
+        mTarget = gfxPlatform::GetPlatform()->CreateOffscreenCanvasDrawTarget(size, format);
         mode = RenderingMode::SoftwareBackendMode;
      }
   }
 
-  // Restrict async canvas drawing to OSX for now since we get test failures
-  // on other platforms.
-  if (mFinalTarget) {
-#ifdef XP_MACOSX
-    mTarget = mDelayedTarget = mFinalTarget->CreateCaptureDT(size);
-#else
-    mTarget = mFinalTarget;
-#endif
-  }
-
-  mPendingCommands = 0;
-
   if (mTarget) {
     static bool registered = false;
     if (!registered) {
       registered = true;
       RegisterStrongMemoryReporter(new Canvas2dPixelsReporter());
     }
 
     gCanvasAzureMemoryUsed += mWidth * mHeight * 4;
@@ -1536,17 +1410,17 @@ CanvasRenderingContext2D::EnsureTarget(R
     if (mCanvasElement) {
       mCanvasElement->InvalidateCanvas();
     }
     // Calling Redraw() tells our invalidation machinery that the entire
     // canvas is already invalid, which can speed up future drawing.
     Redraw();
   } else {
     EnsureErrorTarget();
-    mTarget = mFinalTarget = sErrorTarget;
+    mTarget = sErrorTarget;
   }
 
   return mode;
 }
 
 #ifdef DEBUG
 int32_t
 CanvasRenderingContext2D::GetWidth() const
@@ -1556,61 +1430,16 @@ CanvasRenderingContext2D::GetWidth() con
 
 int32_t
 CanvasRenderingContext2D::GetHeight() const
 {
   return mHeight;
 }
 #endif
 
-class DrawCaptureTask : public nsRunnable
-{
-public:
-  DrawCaptureTask(DrawTargetCapture *aReplay, DrawTarget* aDest)
-    : mReplay(aReplay)
-    , mDest(aDest)
-  {
-  }
-
-  NS_IMETHOD Run()
-  {
-    mDest->DrawCapturedDT(mReplay, Matrix());
-    return NS_OK;
-  }
-
-private:
-  RefPtr<DrawTargetCapture> mReplay;
-  RefPtr<DrawTarget> mDest;
-};
-
-void
-CanvasRenderingContext2D::FlushDelayedTarget()
-{
-  if (!mDelayedTarget) {
-    return;
-  }
-  mPendingCommands = 0;
-
-  nsCOMPtr<nsIRunnable> task = new DrawCaptureTask(mDelayedTarget, mFinalTarget);
-  mTaskQueue->Dispatch(task.forget());
-
-  mDelayedTarget = mFinalTarget->CreateCaptureDT(IntSize(mWidth, mHeight));
-
-  mDelayedTarget->SetTransform(mTarget->GetTransform());
-  mTarget = mDelayedTarget;
-}
-
-void
-CanvasRenderingContext2D::FinishDelayedRendering()
-{
-  if (mTaskQueue) {
-    mTaskQueue->AwaitIdle();
-  }
-}
-
 NS_IMETHODIMP
 CanvasRenderingContext2D::SetDimensions(int32_t width, int32_t height)
 {
   ClearTarget();
 
   // Zero sized surfaces can cause problems.
   mZero = false;
   if (height == 0) {
@@ -1750,17 +1579,17 @@ CanvasRenderingContext2D::SetContextOpti
 void
 CanvasRenderingContext2D::GetImageBuffer(uint8_t** aImageBuffer,
                                          int32_t* aFormat)
 {
   *aImageBuffer = nullptr;
   *aFormat = 0;
 
   EnsureTarget();
-  RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
+  RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
   if (!snapshot) {
     return;
   }
 
   RefPtr<DataSourceSurface> data = snapshot->GetDataSurface();
   if (!data || data->GetSize() != IntSize(mWidth, mHeight)) {
     return;
   }
@@ -2169,17 +1998,17 @@ CanvasRenderingContext2D::CreatePattern(
   }
 
   EnsureTarget();
 
   // The canvas spec says that createPattern should use the first frame
   // of animated images
   nsLayoutUtils::SurfaceFromElementResult res =
     nsLayoutUtils::SurfaceFromElement(htmlElement,
-      nsLayoutUtils::SFE_WANT_FIRST_FRAME, mFinalTarget);
+      nsLayoutUtils::SFE_WANT_FIRST_FRAME, mTarget);
 
   if (!res.mSourceSurface) {
     error.Throw(NS_ERROR_NOT_AVAILABLE);
     return nullptr;
   }
 
   nsRefPtr<CanvasPattern> pat =
     new CanvasPattern(this, res.mSourceSurface, repeatMode, res.mPrincipal,
@@ -4480,17 +4309,17 @@ CanvasRenderingContext2D::DrawImage(cons
     // The cache lookup can miss even if the image is already in the cache
     // if the image is coming from a different element or cached for a
     // different canvas. This covers the case when we miss due to caching
     // for a different canvas, but CanvasImageCache should be fixed if we
     // see misses due to different elements drawing the same image.
     nsLayoutUtils::SurfaceFromElementResult res =
       CachedSurfaceFromElement(element);
     if (!res.mSourceSurface)
-      res = nsLayoutUtils::SurfaceFromElement(element, sfeFlags, mFinalTarget);
+      res = nsLayoutUtils::SurfaceFromElement(element, sfeFlags, mTarget);
 
     if (!res.mSourceSurface && !res.mDrawInfo.mImgContainer) {
       // The spec says to silently do nothing in the following cases:
       //   - The element is still loading.
       //   - The image is bad, but it's not in the broken state (i.e., we could
       //     decode the headers and get the size).
       if (!res.mIsStillLoading && !res.mHasSize) {
         error.Throw(NS_ERROR_NOT_AVAILABLE);
@@ -4824,22 +4653,17 @@ CanvasRenderingContext2D::DrawWindow(nsG
   }
   nsRefPtr<gfxContext> thebes;
   RefPtr<DrawTarget> drawDT;
   // Rendering directly is faster and can be done if mTarget supports Azure
   // and does not need alpha blending.
   if (gfxPlatform::GetPlatform()->SupportsAzureContentForDrawTarget(mTarget) &&
       GlobalAlpha() == 1.0f)
   {
-    // Complete any async rendering and use synchronous rendering for DrawWindow
-    // until we're confident it works for all content.
-    FlushDelayedTarget();
-    FinishDelayedRendering();
-
-    thebes = new gfxContext(mFinalTarget);
+    thebes = new gfxContext(mTarget);
     thebes->SetMatrix(gfxMatrix(matrix._11, matrix._12, matrix._21,
                                 matrix._22, matrix._31, matrix._32));
   } else {
     drawDT =
       gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(IntSize(ceil(sw), ceil(sh)),
                                                                    SurfaceFormat::B8G8R8A8);
     if (!drawDT) {
       error.Throw(NS_ERROR_FAILURE);
@@ -5086,17 +4910,17 @@ CanvasRenderingContext2D::GetImageDataAr
     return NS_ERROR_DOM_SYNTAX_ERR;
   }
 
   IntRect srcRect(0, 0, mWidth, mHeight);
   IntRect destRect(aX, aY, aWidth, aHeight);
   IntRect srcReadRect = srcRect.Intersect(destRect);
   RefPtr<DataSourceSurface> readback;
   if (!srcReadRect.IsEmpty() && !mZero) {
-    RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
+    RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
     if (snapshot) {
       readback = snapshot->GetDataSurface();
     }
     if (!readback || !readback->GetData()) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
   }
 
@@ -5472,17 +5296,17 @@ CanvasRenderingContext2D::GetCanvasLayer
   // layer manager which must NOT happen during a paint.
   if (!mTarget || !IsTargetValid()) {
     // No DidTransactionCallback will be received, so mark the context clean
     // now so future invalidations will be dispatched.
     MarkContextClean();
     return nullptr;
   }
 
-  FlushDelayedTarget();
+  mTarget->Flush();
 
   if (!mResetLayer && aOldLayer) {
     CanvasRenderingContext2DUserData* userData =
       static_cast<CanvasRenderingContext2DUserData*>(
         aOldLayer->GetUserData(&g2DContextLayerUserData));
 
     CanvasLayer::Data data;
 
@@ -5521,33 +5345,34 @@ CanvasRenderingContext2D::GetCanvasLayer
   // The layer will be destroyed when we tear down the presentation
   // (at the latest), at which time this userData will be destroyed,
   // releasing the reference to the element.
   // The userData will receive DidTransactionCallbacks, which flush the
   // the invalidation state to indicate that the canvas is up to date.
   userData = new CanvasRenderingContext2DUserData(this);
   canvasLayer->SetDidTransactionCallback(
           CanvasRenderingContext2DUserData::DidTransactionCallback, userData);
-  canvasLayer->SetPreTransactionCallback(
-          CanvasRenderingContext2DUserData::PreTransactionCallback, userData);
   canvasLayer->SetUserData(&g2DContextLayerUserData, userData);
 
   CanvasLayer::Data data;
   data.mSize = nsIntSize(mWidth, mHeight);
   data.mHasAlpha = !mOpaque;
 
   GLuint skiaGLTex = SkiaGLTex();
   if (skiaGLTex) {
+    canvasLayer->SetPreTransactionCallback(
+            CanvasRenderingContext2DUserData::PreTransactionCallback, userData);
+
     SkiaGLGlue* glue = gfxPlatform::GetPlatform()->GetSkiaGLGlue();
     MOZ_ASSERT(glue);
 
     data.mGLContext = glue->GetGLContext();
     data.mFrontbufferGLTex = skiaGLTex;
   } else {
-    data.mDrawTarget = mFinalTarget;
+    data.mDrawTarget = mTarget;
   }
 
   canvasLayer->Initialize(data);
   uint32_t flags = mOpaque ? Layer::CONTENT_OPAQUE : 0;
   canvasLayer->SetContentFlags(flags);
   canvasLayer->Updated();
 
   mResetLayer = false;
--- a/dom/canvas/CanvasRenderingContext2D.h
+++ b/dom/canvas/CanvasRenderingContext2D.h
@@ -5,17 +5,16 @@
 #ifndef CanvasRenderingContext2D_h
 #define CanvasRenderingContext2D_h
 
 #include "mozilla/Attributes.h"
 #include <vector>
 #include "nsIDOMCanvasRenderingContext2D.h"
 #include "nsICanvasRenderingContextInternal.h"
 #include "mozilla/RefPtr.h"
-#include "mozilla/Monitor.h"
 #include "nsColor.h"
 #include "mozilla/dom/HTMLCanvasElement.h"
 #include "mozilla/dom/HTMLVideoElement.h"
 #include "CanvasUtils.h"
 #include "gfxTextRun.h"
 #include "mozilla/ErrorResult.h"
 #include "mozilla/dom/CanvasGradient.h"
 #include "mozilla/dom/CanvasRenderingContext2DBinding.h"
@@ -23,17 +22,16 @@
 #include "mozilla/gfx/Rect.h"
 #include "mozilla/gfx/2D.h"
 #include "gfx2DGlue.h"
 #include "imgIEncoder.h"
 #include "nsLayoutUtils.h"
 #include "mozilla/EnumeratedArray.h"
 #include "FilterSupport.h"
 #include "nsSVGEffects.h"
-#include "MediaTaskQueue.h"
 
 class nsGlobalWindow;
 class nsXULElement;
 
 namespace mozilla {
 namespace gl {
 class SourceSurface;
 }
@@ -49,17 +47,16 @@ class CanvasPath;
 
 extern const mozilla::gfx::Float SIGMA_MAX;
 
 template<typename T> class Optional;
 
 struct CanvasBidiProcessor;
 class CanvasRenderingContext2DUserData;
 class CanvasDrawObserver;
-class CanvasShutdownObserver;
 
 /**
  ** CanvasRenderingContext2D
  **/
 class CanvasRenderingContext2D final :
   public nsICanvasRenderingContextInternal,
   public nsWrapperCache
 {
@@ -440,17 +437,24 @@ public:
   }
   NS_IMETHOD SetDimensions(int32_t width, int32_t height) override;
   NS_IMETHOD InitializeWithSurface(nsIDocShell *shell, gfxASurface *surface, int32_t width, int32_t height) override;
 
   NS_IMETHOD GetInputStream(const char* aMimeType,
                             const char16_t* aEncoderOptions,
                             nsIInputStream **aStream) override;
 
-  mozilla::TemporaryRef<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(bool* aPremultAlpha = nullptr) override;
+  mozilla::TemporaryRef<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(bool* aPremultAlpha = nullptr) override
+  {
+    EnsureTarget();
+    if (aPremultAlpha) {
+      *aPremultAlpha = true;
+    }
+    return mTarget->Snapshot();
+  }
 
   NS_IMETHOD SetIsOpaque(bool isOpaque) override;
   bool GetIsOpaque() override { return mOpaque; }
   NS_IMETHOD Reset() override;
   already_AddRefed<CanvasLayer> GetCanvasLayer(nsDisplayListBuilder* aBuilder,
                                                CanvasLayer *aOldLayer,
                                                LayerManager *aManager) override;
   virtual bool ShouldForceInactiveLayer(LayerManager *aManager) override;
@@ -512,43 +516,27 @@ public:
       mozilla::gfx::Matrix transform = mTarget->GetTransform();
       mDSPathBuilder->BezierTo(transform * aCP1,
                                 transform * aCP2,
                                 transform * aCP3);
     }
   }
 
   friend class CanvasRenderingContext2DUserData;
-  friend class CanvasShutdownObserver;
 
   virtual void GetImageBuffer(uint8_t** aImageBuffer, int32_t* aFormat) override;
 
 
   // Given a point, return hit region ID if it exists
   nsString GetHitRegion(const mozilla::gfx::Point& aPoint) override;
 
 
   // return true and fills in the bound rect if element has a hit region.
   bool GetHitRegionRect(Element* aElement, nsRect& aRect) override;
 
-  /**
-   * Deferred rendering functions
-   */
-
-  /**
-   * Called when the event loop reaches a stable
-   * state, and trigger us to flush any outstanding
-   * commands to the rendering thread.
-   */
-  void StableStateReached()
-  {
-    mScheduledFlush = false;
-    FlushDelayedTarget();
-  }
-
 protected:
   nsresult GetImageDataArray(JSContext* aCx, int32_t aX, int32_t aY,
                              uint32_t aWidth, uint32_t aHeight,
                              JSObject** aRetval);
 
   nsresult PutImageData_explicit(int32_t x, int32_t y, uint32_t w, uint32_t h,
                                  dom::Uint8ClampedArray* aArray,
                                  bool hasDirtyRect, int32_t dirtyX, int32_t dirtyY,
@@ -557,18 +545,16 @@ protected:
   /**
    * Internal method to complete initialisation, expects mTarget to have been set
    */
   nsresult Initialize(int32_t width, int32_t height);
 
   nsresult InitializeWithTarget(mozilla::gfx::DrawTarget *surface,
                                 int32_t width, int32_t height);
 
-  void ShutdownTaskQueue();
-
   /**
     * The number of living nsCanvasRenderingContexts.  When this goes down to
     * 0, we free the premultiply and unpremultiply tables, if they exist.
     */
   static uint32_t sNumLivingContexts;
 
   /**
     * Lookup table used to speed up GetImageData().
@@ -722,64 +708,16 @@ protected:
   // If mCanvasElement is not provided, then a docshell is
   nsCOMPtr<nsIDocShell> mDocShell;
 
   // This is created lazily so it is necessary to call EnsureTarget before
   // accessing it. In the event of an error it will be equal to
   // sErrorTarget.
   mozilla::RefPtr<mozilla::gfx::DrawTarget> mTarget;
 
-  /**
-   * Deferred rendering implementation
-   */
-
-  // If we are using deferred rendering, then this is the current
-  // deferred rendering target. It is the same pointer as mTarget.
-  mozilla::RefPtr<mozilla::gfx::DrawTargetCapture> mDelayedTarget;
-
-  // If we are using deferred rendering, then this is the actual destination
-  // buffer.
-  mozilla::RefPtr<mozilla::gfx::DrawTarget> mFinalTarget;
-
-  /**
-   * Add the current DelayedDrawTarget to the rendering queue,
-   * schedule a rendering job if required, and create a new
-   * DelayedDrawTarget.
-   */
-  void FlushDelayedTarget();
-
-  /**
-   * Make sure all commands have been flushed to
-   * the rendering thread, and block until they
-   * are completed.
-   */
-  void FinishDelayedRendering();
-
-  /**
-   * Called when a command is added to the current
-   * delayed draw target.
-   *
-   * Either flushes the current batch of commands to
-   * the rendering thread, or ensures that this happens
-   * the next time the event loop reaches a stable state.
-   */
-  void RecordCommand();
-
-  // The number of commands currently waiting to be sent
-  // to the rendering thread.
-  uint32_t mPendingCommands;
-
-  // True if we have scheduled FlushDelayedTarget to be
-  // called in the next browser stable state.
-  bool mScheduledFlush;
-
-  nsRefPtr<MediaTaskQueue> mTaskQueue;
-
-  nsRefPtr<CanvasShutdownObserver> mShutdownObserver;
-
   uint32_t SkiaGLTex() const;
 
   // This observes our draw calls at the beginning of the canvas
   // lifetime and switches to software or GPU mode depending on
   // what it thinks is best
   CanvasDrawObserver* mDrawObserver;
   void RemoveDrawObserver();
 
--- a/dom/canvas/WebGLElementArrayCache.cpp
+++ b/dom/canvas/WebGLElementArrayCache.cpp
@@ -371,17 +371,17 @@ WebGLElementArrayCacheTree<T>::Update(si
         size_t numLeavesNonPOT = (numberOfElements + kElementsPerLeaf - 1) / kElementsPerLeaf;
         // It only remains to round that up to the next power of two:
         requiredNumLeaves = RoundUpPow2(numLeavesNonPOT);
     }
 
     // Step #0: If needed, resize our tree data storage.
     if (requiredNumLeaves != NumLeaves()) {
         // See class comment for why we the tree storage size is 2 * numLeaves.
-        if (!mTreeData.SetLength(2 * requiredNumLeaves)) {
+        if (!mTreeData.SetLength(2 * requiredNumLeaves, fallible)) {
             mTreeData.SetLength(0);
             return false;
         }
         MOZ_ASSERT(NumLeaves() == requiredNumLeaves);
 
         if (NumLeaves()) {
             // When resizing, update the whole tree, not just the subset
             // corresponding to the part of the buffer being updated.
@@ -465,17 +465,17 @@ WebGLElementArrayCache::WebGLElementArra
 WebGLElementArrayCache::~WebGLElementArrayCache()
 {
 }
 
 bool
 WebGLElementArrayCache::BufferData(const void* ptr, size_t byteLength)
 {
     if (mBytes.Length() != byteLength) {
-        if (!mBytes.SetLength(byteLength)) {
+        if (!mBytes.SetLength(byteLength, fallible)) {
             mBytes.SetLength(0);
             return false;
         }
     }
     MOZ_ASSERT(mBytes.Length() == byteLength);
     return BufferSubData(0, ptr, byteLength);
 }
 
--- a/dom/crypto/CryptoBuffer.cpp
+++ b/dom/crypto/CryptoBuffer.cpp
@@ -12,23 +12,24 @@
 namespace mozilla {
 namespace dom {
 
 uint8_t*
 CryptoBuffer::Assign(const CryptoBuffer& aData)
 {
   // Same as in nsTArray_Impl::operator=, but return the value
   // returned from ReplaceElementsAt to enable OOM detection
-  return ReplaceElementsAt(0, Length(), aData.Elements(), aData.Length());
+  return ReplaceElementsAt(0, Length(), aData.Elements(), aData.Length(),
+                           fallible);
 }
 
 uint8_t*
 CryptoBuffer::Assign(const uint8_t* aData, uint32_t aLength)
 {
-  return ReplaceElementsAt(0, Length(), aData, aLength);
+  return ReplaceElementsAt(0, Length(), aData, aLength, fallible);
 }
 
 uint8_t*
 CryptoBuffer::Assign(const SECItem* aItem)
 {
   MOZ_ASSERT(aItem);
   return Assign(aItem->data, aItem->len);
 }
--- a/dom/crypto/WebCryptoCommon.h
+++ b/dom/crypto/WebCryptoCommon.h
@@ -147,17 +147,17 @@ ReadBuffer(JSStructuredCloneReader* aRea
 {
   uint32_t length, zero;
   bool ret = JS_ReadUint32Pair(aReader, &length, &zero);
   if (!ret) {
     return false;
   }
 
   if (length > 0) {
-    if (!aBuffer.SetLength(length)) {
+    if (!aBuffer.SetLength(length, fallible)) {
       return false;
     }
     ret = JS_ReadBytes(aReader, aBuffer.Elements(), aBuffer.Length());
   }
   return ret;
 }
 
 inline bool
--- a/dom/crypto/WebCryptoTask.cpp
+++ b/dom/crypto/WebCryptoTask.cpp
@@ -550,17 +550,17 @@ private:
                                               CKA_ENCRYPT, &keyItem, nullptr));
     if (!symKey) {
       return NS_ERROR_DOM_INVALID_ACCESS_ERR;
     }
 
     // Initialize the output buffer (enough space for padding / a full tag)
     uint32_t dataLen = mData.Length();
     uint32_t maxLen = dataLen + 16;
-    if (!mResult.SetLength(maxLen)) {
+    if (!mResult.SetLength(maxLen, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
     uint32_t outLen = 0;
 
     // Perform the encryption/decryption
     if (mEncrypt) {
       rv = MapSECStatus(PK11_Encrypt(symKey.get(), mMechanism, &param,
                                      mResult.Elements(), &outLen, maxLen,
@@ -674,17 +674,17 @@ private:
                                                    PK11_OriginUnwrap, fakeOperation,
                                                    &dataItem, nullptr));
       if (!keyToWrap) {
         return NS_ERROR_DOM_OPERATION_ERR;
       }
 
       // Encrypt and return the wrapped key
       // AES-KW encryption results in a wrapped key 64 bits longer
-      if (!mResult.SetLength(mData.Length() + 8)) {
+      if (!mResult.SetLength(mData.Length() + 8, fallible)) {
         return NS_ERROR_DOM_OPERATION_ERR;
       }
       SECItem resultItem = {siBuffer, mResult.Elements(),
                             (unsigned int) mResult.Length()};
       rv = MapSECStatus(PK11_WrapSymKey(mMechanism, nullptr, symKey.get(),
                                         keyToWrap.get(), &resultItem));
       NS_ENSURE_SUCCESS(rv, NS_ERROR_DOM_OPERATION_ERR);
     } else {
@@ -806,17 +806,17 @@ private:
     nsresult rv;
 
     if (!mDataIsSet) {
       return NS_ERROR_DOM_OPERATION_ERR;
     }
 
     // Ciphertext is an integer mod the modulus, so it will be
     // no longer than mStrength octets
-    if (!mResult.SetLength(mStrength)) {
+    if (!mResult.SetLength(mStrength, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
 
     CK_RSA_PKCS_OAEP_PARAMS oaepParams;
     oaepParams.source = CKZ_DATA_SPECIFIED;
 
     oaepParams.pSourceData = mLabel.Length() ? mLabel.Elements() : nullptr;
     oaepParams.ulSourceDataLen = mLabel.Length();
@@ -896,17 +896,17 @@ private:
   CryptoBuffer mData;
   CryptoBuffer mSignature;
   CryptoBuffer mResult;
   bool mSign;
 
   virtual nsresult DoCrypto() override
   {
     // Initialize the output buffer
-    if (!mResult.SetLength(HASH_LENGTH_MAX)) {
+    if (!mResult.SetLength(HASH_LENGTH_MAX, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
 
     ScopedPLArenaPool arena(PORT_NewArena(DER_DEFAULT_CHUNKSIZE));
     if (!arena) {
       return NS_ERROR_DOM_OPERATION_ERR;
     }
 
@@ -1178,17 +1178,17 @@ public:
 private:
   SECOidTag mOidTag;
   CryptoBuffer mData;
 
   virtual nsresult DoCrypto() override
   {
     // Resize the result buffer
     uint32_t hashLen = HASH_ResultLenByOidTag(mOidTag);
-    if (!mResult.SetLength(hashLen)) {
+    if (!mResult.SetLength(hashLen, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
 
     // Compute the hash
     nsresult rv = MapSECStatus(PK11_HashBuf(mOidTag, mResult.Elements(),
                                             mData.Elements(), mData.Length()));
     if (NS_FAILED(rv)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
@@ -2592,17 +2592,17 @@ private:
     // just refers to a buffer managed by symKey. The assignment copies the
     // data, so mResult manages one copy, while symKey manages another.
     ATTEMPT_BUFFER_ASSIGN(mResult, PK11_GetKeyData(symKey));
 
     if (mLength > mResult.Length()) {
       return NS_ERROR_DOM_DATA_ERR;
     }
 
-    if (!mResult.SetLength(mLength)) {
+    if (!mResult.SetLength(mLength, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
 
     return NS_OK;
   }
 };
 
 class DeriveDhBitsTask : public ReturnArrayBufferViewTask
@@ -2691,17 +2691,17 @@ private:
     // just refers to a buffer managed by symKey. The assignment copies the
     // data, so mResult manages one copy, while symKey manages another.
     ATTEMPT_BUFFER_ASSIGN(mResult, PK11_GetKeyData(symKey));
 
     if (mLength > mResult.Length()) {
       return NS_ERROR_DOM_DATA_ERR;
     }
 
-    if (!mResult.SetLength(mLength)) {
+    if (!mResult.SetLength(mLength, fallible)) {
       return NS_ERROR_DOM_UNKNOWN_ERR;
     }
 
     return NS_OK;
   }
 };
 
 template<class KeyEncryptTask>
--- a/dom/indexedDB/ActorsParent.cpp
+++ b/dom/indexedDB/ActorsParent.cpp
@@ -2711,17 +2711,18 @@ InsertIndexDataValuesFunction::OnFunctio
 
   Key value;
   rv = value.SetFromValueArray(aValues, 3);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
   }
 
   // Update the array with the new addition.
-  if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() + 1))) {
+  if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() + 1,
+                                          fallible))) {
     IDB_REPORT_INTERNAL_ERR();
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   MOZ_ALWAYS_TRUE(
     indexValues.InsertElementSorted(IndexDataValue(indexId, !!unique, value)));
 
   // Compress the array.
@@ -8103,24 +8104,25 @@ ConvertBlobsToActors(PBackgroundParent* 
   MOZ_ASSERT(exists);
 
   DebugOnly<bool> isDirectory;
   MOZ_ASSERT(NS_SUCCEEDED(directory->IsDirectory(&isDirectory)));
   MOZ_ASSERT(isDirectory);
 
   const uint32_t count = aFiles.Length();
 
-  if (NS_WARN_IF(!aActors.SetCapacity(count))) {
+  if (NS_WARN_IF(!aActors.SetCapacity(count, fallible))) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   const bool collectFileInfos =
     !BackgroundParent::IsOtherProcessActor(aBackgroundActor);
 
-  if (collectFileInfos && NS_WARN_IF(!aFileInfos.SetCapacity(count))) {
+  if (collectFileInfos &&
+      NS_WARN_IF(!aFileInfos.SetCapacity(count, fallible))) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   for (uint32_t index = 0; index < count; index++) {
     const StructuredCloneFile& file = aFiles[index];
 
     const int64_t fileId = file.mFileInfo->Id();
     MOZ_ASSERT(fileId > 0);
@@ -11574,17 +11576,17 @@ Database::Invalidate()
       AssertIsOnBackgroundThread();
 
       const uint32_t count = aTable.Count();
       if (!count) {
         return true;
       }
 
       FallibleTArray<nsRefPtr<TransactionBase>> transactions;
-      if (NS_WARN_IF(!transactions.SetCapacity(count))) {
+      if (NS_WARN_IF(!transactions.SetCapacity(count, fallible))) {
         return false;
       }
 
       aTable.EnumerateEntries(Collect, &transactions);
 
       if (NS_WARN_IF(transactions.Length() != count)) {
         return false;
       }
@@ -11922,17 +11924,17 @@ Database::AllocPBackgroundIDBTransaction
   const uint32_t nameCount = aObjectStoreNames.Length();
 
   if (NS_WARN_IF(nameCount > objectStores.Count())) {
     ASSERT_UNLESS_FUZZING();
     return nullptr;
   }
 
   FallibleTArray<nsRefPtr<FullObjectStoreMetadata>> fallibleObjectStores;
-  if (NS_WARN_IF(!fallibleObjectStores.SetCapacity(nameCount))) {
+  if (NS_WARN_IF(!fallibleObjectStores.SetCapacity(nameCount, fallible))) {
     return nullptr;
   }
 
   for (uint32_t nameIndex = 0; nameIndex < nameCount; nameIndex++) {
     const nsString& name = aObjectStoreNames[nameIndex];
 
     if (nameIndex) {
       // Make sure that this name is sorted properly and not a duplicate.
@@ -15764,17 +15766,17 @@ DatabaseOperationBase::GetStructuredClon
 
   size_t uncompressedLength;
   if (NS_WARN_IF(!snappy::GetUncompressedLength(compressed, compressedLength,
                                                 &uncompressedLength))) {
     return NS_ERROR_FILE_CORRUPTED;
   }
 
   AutoFallibleTArray<uint8_t, 512> uncompressed;
-  if (NS_WARN_IF(!uncompressed.SetLength(uncompressedLength))) {
+  if (NS_WARN_IF(!uncompressed.SetLength(uncompressedLength, fallible))) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   char* uncompressedBuffer = reinterpret_cast<char*>(uncompressed.Elements());
 
   if (NS_WARN_IF(!snappy::RawUncompress(compressed, compressedLength,
                                         uncompressedBuffer))) {
     return NS_ERROR_FILE_CORRUPTED;
@@ -15951,17 +15953,17 @@ DatabaseOperationBase::IndexDataValuesFr
                  js::ProfileEntry::Category::STORAGE);
 
   const uint32_t count = aUpdateInfos.Length();
 
   if (!count) {
     return NS_OK;
   }
 
-  if (NS_WARN_IF(!aIndexValues.SetCapacity(count))) {
+  if (NS_WARN_IF(!aIndexValues.SetCapacity(count, fallible))) {
     IDB_REPORT_INTERNAL_ERR();
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   for (uint32_t idxIndex = 0; idxIndex < count; idxIndex++) {
     const IndexUpdateInfo& updateInfo = aUpdateInfos[idxIndex];
     const int64_t& indexId = updateInfo.indexId();
     const Key& key = updateInfo.value();
@@ -20385,17 +20387,17 @@ UpdateIndexDataValuesFunction::OnFunctio
     return rv;
   }
 
   const bool hadPreviousIndexValues = !indexValues.IsEmpty();
 
   const uint32_t updateInfoCount = updateInfos.Length();
 
   if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() +
-                                          updateInfoCount))) {
+                                          updateInfoCount, fallible))) {
     IDB_REPORT_INTERNAL_ERR();
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   // First construct the full list to update the index_data_values row.
   for (uint32_t index = 0; index < updateInfoCount; index++) {
     const IndexUpdateInfo& info = updateInfos[index];
 
@@ -21140,17 +21142,17 @@ ObjectStoreAddOrPutRequestOp::Init(Trans
   }
 #endif
 
   const nsTArray<DatabaseFileOrMutableFileId>& files = mParams.files();
 
   if (!files.IsEmpty()) {
     const uint32_t count = files.Length();
 
-    if (NS_WARN_IF(!mStoredFileInfos.SetCapacity(count))) {
+    if (NS_WARN_IF(!mStoredFileInfos.SetCapacity(count, fallible))) {
       return false;
     }
 
     nsRefPtr<FileManager> fileManager =
       aTransaction->GetDatabase()->GetFileManager();
     MOZ_ASSERT(fileManager);
 
     for (uint32_t index = 0; index < count; index++) {
@@ -21749,17 +21751,18 @@ ObjectStoreGetRequestOp::GetResponse(Req
 {
   MOZ_ASSERT_IF(mLimit, mResponse.Length() <= mLimit);
 
   if (mGetAll) {
     aResponse = ObjectStoreGetAllResponse();
 
     if (!mResponse.IsEmpty()) {
       FallibleTArray<SerializedStructuredCloneReadInfo> fallibleCloneInfos;
-      if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length()))) {
+      if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length(),
+                                                   fallible))) {
         aResponse = NS_ERROR_OUT_OF_MEMORY;
         return;
       }
 
       for (uint32_t count = mResponse.Length(), index = 0;
            index < count;
            index++) {
         nsresult rv = ConvertResponse(index, fallibleCloneInfos[index]);
@@ -22297,17 +22300,18 @@ IndexGetRequestOp::GetResponse(RequestRe
 {
   MOZ_ASSERT_IF(!mGetAll, mResponse.Length() <= 1);
 
   if (mGetAll) {
     aResponse = IndexGetAllResponse();
 
     if (!mResponse.IsEmpty()) {
       FallibleTArray<SerializedStructuredCloneReadInfo> fallibleCloneInfos;
-      if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length()))) {
+      if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length(),
+                                                   fallible))) {
         aResponse = NS_ERROR_OUT_OF_MEMORY;
         return;
       }
 
       for (uint32_t count = mResponse.Length(), index = 0;
            index < count;
            index++) {
         StructuredCloneReadInfo& info = mResponse[index];
--- a/dom/indexedDB/IDBObjectStore.cpp
+++ b/dom/indexedDB/IDBObjectStore.cpp
@@ -1173,17 +1173,18 @@ IDBObjectStore::AddOrPut(JSContext* aCx,
   nsTArray<IndexUpdateInfo> updateInfo;
 
   aRv = GetAddInfo(aCx, value, aKey, cloneWriteInfo, key, updateInfo);
   if (aRv.Failed()) {
     return nullptr;
   }
 
   FallibleTArray<uint8_t> cloneData;
-  if (NS_WARN_IF(!cloneData.SetLength(cloneWriteInfo.mCloneBuffer.nbytes()))) {
+  if (NS_WARN_IF(!cloneData.SetLength(cloneWriteInfo.mCloneBuffer.nbytes(),
+                                      fallible))) {
     aRv = NS_ERROR_OUT_OF_MEMORY;
     return nullptr;
   }
 
   // XXX Remove this
   memcpy(cloneData.Elements(), cloneWriteInfo.mCloneBuffer.data(),
          cloneWriteInfo.mCloneBuffer.nbytes());
 
@@ -1201,17 +1202,17 @@ IDBObjectStore::AddOrPut(JSContext* aCx,
     cloneWriteInfo.mBlobOrFileInfos;
 
   FallibleTArray<nsRefPtr<FileInfo>> fileInfosToKeepAlive;
 
   if (!blobOrFileInfos.IsEmpty()) {
     const uint32_t count = blobOrFileInfos.Length();
 
     FallibleTArray<DatabaseFileOrMutableFileId> fileActorOrMutableFileIds;
-    if (NS_WARN_IF(!fileActorOrMutableFileIds.SetCapacity(count))) {
+    if (NS_WARN_IF(!fileActorOrMutableFileIds.SetCapacity(count, fallible))) {
       aRv = NS_ERROR_OUT_OF_MEMORY;
       return nullptr;
     }
 
     IDBDatabase* database = mTransaction->Database();
 
     for (uint32_t index = 0; index < count; index++) {
       StructuredCloneWriteInfo::BlobOrFileInfo& blobOrFileInfo =
--- a/dom/ipc/Blob.cpp
+++ b/dom/ipc/Blob.cpp
@@ -963,17 +963,17 @@ CreateBlobImpl(const nsTArray<BlobData>&
     DebugOnly<bool> isMutable;
     MOZ_ASSERT(NS_SUCCEEDED(blobImpl->GetMutable(&isMutable)));
     MOZ_ASSERT(!isMutable);
 
     return blobImpl.forget();
   }
 
   FallibleTArray<nsRefPtr<BlobImpl>> fallibleBlobImpls;
-  if (NS_WARN_IF(!fallibleBlobImpls.SetLength(aBlobDatas.Length()))) {
+  if (NS_WARN_IF(!fallibleBlobImpls.SetLength(aBlobDatas.Length(), fallible))) {
     return nullptr;
   }
 
   nsTArray<nsRefPtr<BlobImpl>> blobImpls;
   fallibleBlobImpls.SwapElements(blobImpls);
 
   const bool hasRecursed = aMetadata.mHasRecursed;
   aMetadata.mHasRecursed = true;
--- a/dom/media/CanvasCaptureMediaStream.cpp
+++ b/dom/media/CanvasCaptureMediaStream.cpp
@@ -221,17 +221,17 @@ public:
     RefPtr<CairoImage> image = new layers::CairoImage();
     image->SetData(imageData);
 
     SetImage(image);
     return NS_OK;
   }
 
   NS_IMETHODIMP
-  Notify(nsITimer* aTimer)
+  Notify(nsITimer* aTimer) override
   {
     nsresult rv = TakeSnapshot();
     if (NS_FAILED(rv)) {
       aTimer->Cancel();
     }
     return rv;
   }
 
--- a/dom/media/DecodedStream.cpp
+++ b/dom/media/DecodedStream.cpp
@@ -179,9 +179,35 @@ OutputStreamData::~OutputStreamData()
 void
 OutputStreamData::Init(MediaDecoder* aDecoder, ProcessedMediaStream* aStream)
 {
   mStream = aStream;
   mListener = new OutputStreamListener(aDecoder, aStream);
   aStream->AddListener(mListener);
 }
 
+DecodedStreamData*
+DecodedStream::GetData()
+{
+  return mData.get();
+}
+
+void
+DecodedStream::DestroyData()
+{
+  mData = nullptr;
+}
+
+void
+DecodedStream::RecreateData(MediaDecoder* aDecoder, int64_t aInitialTime,
+                            SourceMediaStream* aStream)
+{
+  MOZ_ASSERT(!mData);
+  mData.reset(new DecodedStreamData(aDecoder, aInitialTime, aStream));
+}
+
+nsTArray<OutputStreamData>&
+DecodedStream::OutputStreams()
+{
+  return mOutputStreams;
+}
+
 } // namespace mozilla
--- a/dom/media/DecodedStream.h
+++ b/dom/media/DecodedStream.h
@@ -3,16 +3,18 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef DecodedStream_h_
 #define DecodedStream_h_
 
 #include "nsRefPtr.h"
+#include "nsTArray.h"
+#include "mozilla/UniquePtr.h"
 #include "mozilla/gfx/Point.h"
 
 namespace mozilla {
 
 class MediaDecoder;
 class MediaInputPort;
 class SourceMediaStream;
 class ProcessedMediaStream;
@@ -89,11 +91,25 @@ public:
   ~OutputStreamData();
   void Init(MediaDecoder* aDecoder, ProcessedMediaStream* aStream);
   nsRefPtr<ProcessedMediaStream> mStream;
   // mPort connects DecodedStreamData::mStream to our mStream.
   nsRefPtr<MediaInputPort> mPort;
   nsRefPtr<OutputStreamListener> mListener;
 };
 
+class DecodedStream {
+public:
+  DecodedStreamData* GetData();
+  void DestroyData();
+  void RecreateData(MediaDecoder* aDecoder, int64_t aInitialTime,
+                    SourceMediaStream* aStream);
+  nsTArray<OutputStreamData>& OutputStreams();
+
+private:
+  UniquePtr<DecodedStreamData> mData;
+  // Data about MediaStreams that are being fed by the decoder.
+  nsTArray<OutputStreamData> mOutputStreams;
+};
+
 } // namespace mozilla
 
 #endif // DecodedStream_h_
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -1,13 +1,14 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
 #include "MediaData.h"
 #include "MediaInfo.h"
 #ifdef MOZ_OMX_DECODER
 #include "GrallocImages.h"
 #include "mozilla/layers/TextureClient.h"
 #endif
 #include "VideoUtils.h"
 #include "ImageContainer.h"
@@ -500,18 +501,20 @@ MediaRawData::MediaRawData(const uint8_t
   , mSize(0)
   , mCrypto(mCryptoInternal)
   , mBuffer(new MediaLargeByteBuffer(RAW_DATA_DEFAULT_SIZE))
   , mPadding(0)
 {
   if (!EnsureCapacity(aSize)) {
     return;
   }
-  mBuffer->AppendElements(aData, aSize);
-  mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
+
+  // We ensure sufficient capacity above so this shouldn't fail.
+  MOZ_ALWAYS_TRUE(mBuffer->AppendElements(aData, aSize));
+  MOZ_ALWAYS_TRUE(mBuffer->AppendElements(RAW_DATA_ALIGNMENT));
   mSize = aSize;
 }
 
 already_AddRefed<MediaRawData>
 MediaRawData::Clone() const
 {
   nsRefPtr<MediaRawData> s = new MediaRawData;
   s->mTimecode = mTimecode;
@@ -520,47 +523,50 @@ MediaRawData::Clone() const
   s->mOffset = mOffset;
   s->mKeyframe = mKeyframe;
   s->mExtraData = mExtraData;
   s->mCryptoInternal = mCryptoInternal;
   if (mSize) {
     if (!s->EnsureCapacity(mSize)) {
       return nullptr;
     }
-    s->mBuffer->AppendElements(mData, mSize);
-    s->mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
+
+    // We ensure sufficient capacity above so this shouldn't fail.
+    MOZ_ALWAYS_TRUE(s->mBuffer->AppendElements(mData, mSize));
+    MOZ_ALWAYS_TRUE(s->mBuffer->AppendElements(RAW_DATA_ALIGNMENT));
     s->mSize = mSize;
   }
   return s.forget();
 }
 
 bool
 MediaRawData::EnsureCapacity(size_t aSize)
 {
   if (mData && mBuffer->Capacity() >= aSize + RAW_DATA_ALIGNMENT * 2) {
     return true;
   }
-  if (!mBuffer->SetCapacity(aSize + RAW_DATA_ALIGNMENT * 2)) {
+  if (!mBuffer->SetCapacity(aSize + RAW_DATA_ALIGNMENT * 2, fallible)) {
     return false;
   }
   // Find alignment address.
   const uintptr_t alignmask = RAW_DATA_ALIGNMENT;
   mData = reinterpret_cast<uint8_t*>(
     (reinterpret_cast<uintptr_t>(mBuffer->Elements()) + alignmask) & ~alignmask);
   MOZ_ASSERT(uintptr_t(mData) % (RAW_DATA_ALIGNMENT+1) == 0);
 
   // Shift old data according to new padding.
   uint32_t oldpadding = int32_t(mPadding);
   mPadding = mData - mBuffer->Elements();
   int32_t shift = int32_t(mPadding) - int32_t(oldpadding);
 
   if (shift == 0) {
     // Nothing to do.
   } else if (shift > 0) {
-    mBuffer->InsertElementsAt(oldpadding, shift);
+    // We ensure sufficient capacity above so this shouldn't fail.
+    MOZ_ALWAYS_TRUE(mBuffer->InsertElementsAt(oldpadding, shift, fallible));
   } else {
     mBuffer->RemoveElementsAt(mPadding, -shift);
   }
   return true;
 }
 
 MediaRawData::~MediaRawData()
 {
@@ -611,41 +617,49 @@ MediaRawDataWriter::EnsureSize(size_t aS
 }
 
 bool
 MediaRawDataWriter::SetSize(size_t aSize)
 {
   if (aSize > mTarget->mSize && !EnsureSize(aSize)) {
     return false;
   }
-  // Pad our buffer.
-  mBuffer->SetLength(aSize + mTarget->mPadding + RAW_DATA_ALIGNMENT);
+
+  // Pad our buffer. We ensure sufficient capacity above so this shouldn't fail.
+  MOZ_ALWAYS_TRUE(
+    mBuffer->SetLength(aSize + mTarget->mPadding + RAW_DATA_ALIGNMENT,
+                       fallible));
   mTarget->mSize = mSize = aSize;
   return true;
 }
 
 bool
 MediaRawDataWriter::Prepend(const uint8_t* aData, size_t aSize)
 {
   if (!EnsureSize(aSize + mTarget->mSize)) {
     return false;
   }
-  mBuffer->InsertElementsAt(mTarget->mPadding, aData, aSize);
+
+  // We ensure sufficient capacity above so this shouldn't fail.
+  MOZ_ALWAYS_TRUE(mBuffer->InsertElementsAt(mTarget->mPadding, aData, aSize));
   mTarget->mSize += aSize;
   mSize = mTarget->mSize;
   return true;
 }
 
 bool
 MediaRawDataWriter::Replace(const uint8_t* aData, size_t aSize)
 {
   if (!EnsureSize(aSize)) {
     return false;
   }
-  mBuffer->ReplaceElementsAt(mTarget->mPadding, mTarget->mSize, aData, aSize);
+
+  // We ensure sufficient capacity above so this shouldn't fail.
+  MOZ_ALWAYS_TRUE(mBuffer->ReplaceElementsAt(mTarget->mPadding, mTarget->mSize,
+                                             aData, aSize, fallible));
   mTarget->mSize = mSize = aSize;
   return true;
 }
 
 void
 MediaRawDataWriter::Clear()
 {
   mBuffer->RemoveElementsAt(mTarget->mPadding, mTarget->mSize);
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -290,123 +290,123 @@ void MediaDecoder::SetVolume(double aVol
 }
 
 void MediaDecoder::ConnectDecodedStreamToOutputStream(OutputStreamData* aStream)
 {
   NS_ASSERTION(!aStream->mPort, "Already connected?");
 
   // The output stream must stay in sync with the decoded stream, so if
   // either stream is blocked, we block the other.
-  aStream->mPort = aStream->mStream->AllocateInputPort(mDecodedStream->mStream,
+  aStream->mPort = aStream->mStream->AllocateInputPort(GetDecodedStream()->mStream,
       MediaInputPort::FLAG_BLOCK_INPUT | MediaInputPort::FLAG_BLOCK_OUTPUT);
   // Unblock the output stream now. While it's connected to mDecodedStream,
   // mDecodedStream is responsible for controlling blocking.
   aStream->mStream->ChangeExplicitBlockerCount(-1);
 }
 
 void MediaDecoder::UpdateDecodedStream()
 {
   MOZ_ASSERT(NS_IsMainThread());
   GetReentrantMonitor().AssertCurrentThreadIn();
 
-  if (mDecodedStream) {
+  if (GetDecodedStream()) {
     bool blockForPlayState = mPlayState != PLAY_STATE_PLAYING || mLogicallySeeking;
-    if (mDecodedStream->mHaveBlockedForPlayState != blockForPlayState) {
-      mDecodedStream->mStream->ChangeExplicitBlockerCount(blockForPlayState ? 1 : -1);
-      mDecodedStream->mHaveBlockedForPlayState = blockForPlayState;
+    if (GetDecodedStream()->mHaveBlockedForPlayState != blockForPlayState) {
+      GetDecodedStream()->mStream->ChangeExplicitBlockerCount(blockForPlayState ? 1 : -1);
+      GetDecodedStream()->mHaveBlockedForPlayState = blockForPlayState;
     }
   }
 }
 
 void MediaDecoder::DestroyDecodedStream()
 {
   MOZ_ASSERT(NS_IsMainThread());
   GetReentrantMonitor().AssertCurrentThreadIn();
 
   // Avoid the redundant blocking to output stream.
   if (!GetDecodedStream()) {
     return;
   }
 
   // All streams are having their SourceMediaStream disconnected, so they
   // need to be explicitly blocked again.
-  for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
-    OutputStreamData& os = mOutputStreams[i];
+  auto& outputStreams = OutputStreams();
+  for (int32_t i = outputStreams.Length() - 1; i >= 0; --i) {
+    OutputStreamData& os = outputStreams[i];
     // Explicitly remove all existing ports.
     // This is not strictly necessary but it's good form.
     MOZ_ASSERT(os.mPort, "Double-delete of the ports!");
     os.mPort->Destroy();
     os.mPort = nullptr;
     // During cycle collection, nsDOMMediaStream can be destroyed and send
     // its Destroy message before this decoder is destroyed. So we have to
     // be careful not to send any messages after the Destroy().
     if (os.mStream->IsDestroyed()) {
       // Probably the DOM MediaStream was GCed. Clean up.
-      mOutputStreams.RemoveElementAt(i);
+      outputStreams.RemoveElementAt(i);
     } else {
       os.mStream->ChangeExplicitBlockerCount(1);
     }
   }
 
-  mDecodedStream = nullptr;
+  mDecodedStream.DestroyData();
 }
 
 void MediaDecoder::UpdateStreamBlockingForStateMachinePlaying()
 {
   GetReentrantMonitor().AssertCurrentThreadIn();
-  if (!mDecodedStream) {
+  if (!GetDecodedStream()) {
     return;
   }
   bool blockForStateMachineNotPlaying =
     mDecoderStateMachine && !mDecoderStateMachine->IsPlaying() &&
     mDecoderStateMachine->GetState() != MediaDecoderStateMachine::DECODER_STATE_COMPLETED;
-  if (blockForStateMachineNotPlaying != mDecodedStream->mHaveBlockedForStateMachineNotPlaying) {
-    mDecodedStream->mHaveBlockedForStateMachineNotPlaying = blockForStateMachineNotPlaying;
+  if (blockForStateMachineNotPlaying != GetDecodedStream()->mHaveBlockedForStateMachineNotPlaying) {
+    GetDecodedStream()->mHaveBlockedForStateMachineNotPlaying = blockForStateMachineNotPlaying;
     int32_t delta = blockForStateMachineNotPlaying ? 1 : -1;
     if (NS_IsMainThread()) {
-      mDecodedStream->mStream->ChangeExplicitBlockerCount(delta);
+      GetDecodedStream()->mStream->ChangeExplicitBlockerCount(delta);
     } else {
       nsCOMPtr<nsIRunnable> runnable =
-          NS_NewRunnableMethodWithArg<int32_t>(mDecodedStream->mStream.get(),
+          NS_NewRunnableMethodWithArg<int32_t>(GetDecodedStream()->mStream.get(),
               &MediaStream::ChangeExplicitBlockerCount, delta);
       NS_DispatchToMainThread(runnable);
     }
   }
 }
 
 void MediaDecoder::RecreateDecodedStream(int64_t aStartTimeUSecs,
                                          MediaStreamGraph* aGraph)
 {
   MOZ_ASSERT(NS_IsMainThread());
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   DECODER_LOG("RecreateDecodedStream aStartTimeUSecs=%lld!", aStartTimeUSecs);
 
   if (!aGraph) {
-    aGraph = mDecodedStream->mStream->Graph();
+    aGraph = GetDecodedStream()->mStream->Graph();
   }
   DestroyDecodedStream();
 
-  mDecodedStream = new DecodedStreamData(this,
-                                         aStartTimeUSecs,
-                                         aGraph->CreateSourceStream(nullptr));
+  mDecodedStream.RecreateData(this, aStartTimeUSecs, aGraph->CreateSourceStream(nullptr));
 
   // Note that the delay between removing ports in DestroyDecodedStream
   // and adding new ones won't cause a glitch since all graph operations
   // between main-thread stable states take effect atomically.
-  for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
-    OutputStreamData& os = mOutputStreams[i];
+  auto& outputStreams = OutputStreams();
+  for (int32_t i = outputStreams.Length() - 1; i >= 0; --i) {
+    OutputStreamData& os = outputStreams[i];
     MOZ_ASSERT(!os.mStream->IsDestroyed(),
         "Should've been removed in DestroyDecodedStream()");
     ConnectDecodedStreamToOutputStream(&os);
   }
   UpdateStreamBlockingForStateMachinePlaying();
 
-  mDecodedStream->mHaveBlockedForPlayState = mPlayState != PLAY_STATE_PLAYING;
-  if (mDecodedStream->mHaveBlockedForPlayState) {
-    mDecodedStream->mStream->ChangeExplicitBlockerCount(1);
+  GetDecodedStream()->mHaveBlockedForPlayState = mPlayState != PLAY_STATE_PLAYING;
+  if (GetDecodedStream()->mHaveBlockedForPlayState) {
+    GetDecodedStream()->mStream->ChangeExplicitBlockerCount(1);
   }
 }
 
 void MediaDecoder::AddOutputStream(ProcessedMediaStream* aStream,
                                    bool aFinishWhenEnded)
 {
   MOZ_ASSERT(NS_IsMainThread());
   DECODER_LOG("AddOutputStream aStream=%p!", aStream);
@@ -414,17 +414,17 @@ void MediaDecoder::AddOutputStream(Proce
   {
     ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
     if (mDecoderStateMachine) {
       mDecoderStateMachine->DispatchAudioCaptured();
     }
     if (!GetDecodedStream()) {
       RecreateDecodedStream(mLogicalPosition, aStream->Graph());
     }
-    OutputStreamData* os = mOutputStreams.AppendElement();
+    OutputStreamData* os = OutputStreams().AppendElement();
     os->Init(this, aStream);
     ConnectDecodedStreamToOutputStream(os);
     if (aFinishWhenEnded) {
       // Ensure that aStream finishes the moment mDecodedStream does.
       aStream->SetAutofinish(true);
     }
   }
 
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -415,25 +415,27 @@ public:
    */
   void RecreateDecodedStream(int64_t aStartTimeUSecs,
                              MediaStreamGraph* aGraph = nullptr);
   /**
    * Call this when mDecoderStateMachine or mDecoderStateMachine->IsPlaying() changes.
    * Decoder monitor must be held.
    */
   void UpdateStreamBlockingForStateMachinePlaying();
+
   nsTArray<OutputStreamData>& OutputStreams()
   {
     GetReentrantMonitor().AssertCurrentThreadIn();
-    return mOutputStreams;
+    return mDecodedStream.OutputStreams();
   }
+
   DecodedStreamData* GetDecodedStream()
   {
     GetReentrantMonitor().AssertCurrentThreadIn();
-    return mDecodedStream;
+    return mDecodedStream.GetData();
   }
 
   // Add an output stream. All decoder output will be sent to the stream.
   // The stream is initially blocked. The decoder is responsible for unblocking
   // it while it is playing back.
   virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
 
   // Return the duration of the video in seconds.
@@ -1025,24 +1027,22 @@ private:
   // change.  Explicitly private for force access via GetReentrantMonitor.
   ReentrantMonitor mReentrantMonitor;
 
 #ifdef MOZ_EME
   nsRefPtr<CDMProxy> mProxy;
 #endif
 
 protected:
-  // Data about MediaStreams that are being fed by this decoder.
-  nsTArray<OutputStreamData> mOutputStreams;
   // The SourceMediaStream we are using to feed the mOutputStreams. This stream
   // is never exposed outside the decoder.
   // Only written on the main thread while holding the monitor. Therefore it
   // can be read on any thread while holding the monitor, or on the main thread
   // without holding the monitor.
-  nsAutoPtr<DecodedStreamData> mDecodedStream;
+  DecodedStream mDecodedStream;
 
   // Set to one of the valid play states.
   // This can only be changed on the main thread while holding the decoder
   // monitor. Thus, it can be safely read while holding the decoder monitor
   // OR on the main thread.
   // Any change to the state on the main thread must call NotifyAll on the
   // monitor so the decode thread can wake up.
   Canonical<PlayState> mPlayState;
--- a/dom/media/MediaTaskQueue.cpp
+++ b/dom/media/MediaTaskQueue.cpp
@@ -22,17 +22,16 @@ MediaTaskQueue::MediaTaskQueue(Temporary
 {
   MOZ_COUNT_CTOR(MediaTaskQueue);
 }
 
 MediaTaskQueue::~MediaTaskQueue()
 {
   MonitorAutoLock mon(mQueueMonitor);
   MOZ_ASSERT(mIsShutdown);
-  MOZ_DIAGNOSTIC_ASSERT(mTasks.empty());
   MOZ_COUNT_DTOR(MediaTaskQueue);
 }
 
 TaskDispatcher&
 MediaTaskQueue::TailDispatcher()
 {
   MOZ_ASSERT(IsCurrentThreadIn());
   MOZ_ASSERT(mTailDispatcher);
--- a/dom/media/fmp4/MP4Demuxer.cpp
+++ b/dom/media/fmp4/MP4Demuxer.cpp
@@ -30,17 +30,17 @@ MP4Demuxer::Init()
   AutoPinned<mp4_demuxer::ResourceStream> stream(mStream);
 
   // Check that we have enough data to read the metadata.
   MediaByteRange br = mp4_demuxer::MP4Metadata::MetadataRange(stream);
   if (br.IsNull()) {
     return InitPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, __func__);
   }
 
-  if (!mInitData->SetLength(br.Length())) {
+  if (!mInitData->SetLength(br.Length(), fallible)) {
     // OOM
     return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__);
   }
 
   size_t size;
   mStream->ReadAt(br.mStart, mInitData->Elements(), br.Length(), &size);
   if (size != size_t(br.Length())) {
     return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__);
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -170,17 +170,17 @@ public:
     }
 
     // XXX This is a bit of a hack.  Assume if there are no timecodes
     // present and it's an init segment that it's _just_ an init segment.
     // We should be more precise.
     if (initSegment || !HasCompleteInitData()) {
       if (mParser.mInitEndOffset > 0) {
         MOZ_ASSERT(mParser.mInitEndOffset <= mResource->GetLength());
-        if (!mInitData->SetLength(mParser.mInitEndOffset)) {
+        if (!mInitData->SetLength(mParser.mInitEndOffset, fallible)) {
           // Super unlikely OOM
           return false;
         }
         char* buffer = reinterpret_cast<char*>(mInitData->Elements());
         mResource->ReadFromCache(buffer, 0, mParser.mInitEndOffset);
         MSE_DEBUG(WebMContainerParser, "Stashed init of %u bytes.",
                   mParser.mInitEndOffset);
         mResource = nullptr;
@@ -301,17 +301,17 @@ public:
       MediaByteRange(mParser->mOffset, mResource->GetLength());
     byteRanges.AppendElement(mbr);
     mParser->RebuildFragmentedIndex(byteRanges);
 
     if (initSegment || !HasCompleteInitData()) {
       const MediaByteRange& range = mParser->mInitRange;
       uint32_t length = range.mEnd - range.mStart;
       if (length) {
-        if (!mInitData->SetLength(length)) {
+        if (!mInitData->SetLength(length, fallible)) {
           // Super unlikely OOM
           return false;
         }
         char* buffer = reinterpret_cast<char*>(mInitData->Elements());
         mResource->ReadFromCache(buffer, range.mStart, length);
         MSE_DEBUG(MP4ContainerParser ,"Stashed init of %u bytes.",
                   length);
       } else {
--- a/dom/media/omx/MediaOmxCommonDecoder.cpp
+++ b/dom/media/omx/MediaOmxCommonDecoder.cpp
@@ -42,17 +42,17 @@ MediaOmxCommonDecoder::SetPlatformCanOff
   ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
   mCanOffloadAudio = aCanOffloadAudio;
 }
 
 bool
 MediaOmxCommonDecoder::CheckDecoderCanOffloadAudio()
 {
   return (mCanOffloadAudio && !mFallbackToStateMachine &&
-          !mOutputStreams.Length() && mPlaybackRate == 1.0);
+          !OutputStreams().Length() && mPlaybackRate == 1.0);
 }
 
 void
 MediaOmxCommonDecoder::FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
                                         MediaDecoderEventVisibility aEventVisibility)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
--- a/dom/media/platforms/apple/AppleVDADecoder.cpp
+++ b/dom/media/platforms/apple/AppleVDADecoder.cpp
@@ -452,17 +452,17 @@ AppleVDADecoder::CreateDecoderSpecificat
                             &kCFTypeDictionaryKeyCallBacks,
                             &kCFTypeDictionaryValueCallBacks);
 }
 
 CFDictionaryRef
 AppleVDADecoder::CreateOutputConfiguration()
 {
   // Construct IOSurface Properties
-  const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
+  const void* IOSurfaceKeys[] = { CFSTR("kIOSurfaceIsGlobal") };
   const void* IOSurfaceValues[] = { kCFBooleanTrue };
   static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
                 "Non matching keys/values array size");
 
   // Contruct output configuration.
   AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
     CFDictionaryCreate(kCFAllocatorDefault,
                        IOSurfaceKeys,
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -57,17 +57,17 @@ public:
     // will not go anywhere.
     *aOutput = aInput;
 
     // The output buffer is allocated lazily, on the rendering thread.
     if (!mBufferAllocated) {
       // These allocations might fail if content provides a huge number of
       // channels or size, but it's OK since we'll deal with the failure
       // gracefully.
-      if (mInputChannels.SetLength(mNumberOfChannels)) {
+      if (mInputChannels.SetLength(mNumberOfChannels, fallible)) {
         for (uint32_t i = 0; i < mNumberOfChannels; ++i) {
           mInputChannels[i] = new (fallible) float[mLength];
           if (!mInputChannels[i]) {
             mInputChannels.Clear();
             break;
           }
         }
       }
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -105,17 +105,17 @@ public:
   }
 
   virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
 private:
-  AutoFallibleTArray<Storage,2> mContents;
+  nsAutoTArray<Storage, 2> mContents;
 };
 
 /**
  * Allocates an AudioChunk with fresh buffers of WEBAUDIO_BLOCK_SIZE float samples.
  * AudioChunk::mChannelData's entries can be cast to float* for writing.
  */
 void AllocateAudioBlock(uint32_t aChannelCount, AudioChunk* aChunk);
 
--- a/dom/media/webaudio/DelayBuffer.cpp
+++ b/dom/media/webaudio/DelayBuffer.cpp
@@ -191,17 +191,17 @@ DelayBuffer::EnsureBuffer()
 {
   if (mChunks.Length() == 0) {
     // The length of the buffer is at least one block greater than the maximum
     // delay so that writing an input block does not overwrite the block that
     // would subsequently be read at maximum delay.  Also round up to the next
     // block size, so that no block of writes will need to wrap.
     const int chunkCount = (mMaxDelayTicks + 2 * WEBAUDIO_BLOCK_SIZE - 1) >>
                                          WEBAUDIO_BLOCK_SIZE_BITS;
-    if (!mChunks.SetLength(chunkCount)) {
+    if (!mChunks.SetLength(chunkCount, fallible)) {
       return false;
     }
 
     mLastReadChunk = -1;
   }
   return true;
 }
 
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -347,17 +347,17 @@ MediaDecodeTask::FinishDecode()
     speex_resampler_skip_zeros(resampler);
     resampledFrames += speex_resampler_get_output_latency(resampler);
   }
 
   // Allocate the channel buffers.  Note that if we end up resampling, we may
   // write fewer bytes than mResampledFrames to the output buffer, in which
   // case mWriteIndex will tell us how many valid samples we have.
   bool memoryAllocationSuccess = true;
-  if (!mDecodeJob.mChannelBuffers.SetLength(channelCount)) {
+  if (!mDecodeJob.mChannelBuffers.SetLength(channelCount, fallible)) {
     memoryAllocationSuccess = false;
   } else {
     for (uint32_t i = 0; i < channelCount; ++i) {
       mDecodeJob.mChannelBuffers[i] = new (fallible) float[resampledFrames];
       if (!mDecodeJob.mChannelBuffers[i]) {
         memoryAllocationSuccess = false;
         break;
       }
--- a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp
@@ -75,17 +75,17 @@ MediaStreamAudioSourceNode::PrincipalCha
 {
   bool subsumes = false;
   nsPIDOMWindow* parent = Context()->GetParentObject();
   if (parent) {
     nsIDocument* doc = parent->GetExtantDoc();
     if (doc) {
       nsIPrincipal* docPrincipal = doc->NodePrincipal();
       nsIPrincipal* streamPrincipal = mInputStream->GetPrincipal();
-      if (NS_FAILED(docPrincipal->Subsumes(streamPrincipal, &subsumes))) {
+      if (!streamPrincipal || NS_FAILED(docPrincipal->Subsumes(streamPrincipal, &subsumes))) {
         subsumes = false;
       }
     }
   }
   auto stream = static_cast<AudioNodeExternalInputStream*>(mStream.get());
   stream->SetInt32Parameter(MediaStreamAudioSourceNodeEngine::ENABLE,
                             subsumes || aDOMMediaStream->GetCORSMode() != CORS_NONE);
 }
--- a/dom/mobilemessage/MobileMessageManager.cpp
+++ b/dom/mobilemessage/MobileMessageManager.cpp
@@ -351,17 +351,17 @@ MobileMessageManager::Delete(nsIDOMMozMm
 }
 
 already_AddRefed<DOMRequest>
 MobileMessageManager::Delete(const Sequence<OwningLongOrMozSmsMessageOrMozMmsMessage>& aParams,
                              ErrorResult& aRv)
 {
   const uint32_t size = aParams.Length();
   FallibleTArray<int32_t> idArray;
-  if (!idArray.SetLength(size)) {
+  if (!idArray.SetLength(size, fallible)) {
     aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   DebugOnly<nsresult> rv;
   for (uint32_t i = 0; i < size; i++) {
     const OwningLongOrMozSmsMessageOrMozMmsMessage& element = aParams[i];
     int32_t &id = idArray[i];
--- a/dom/mobilemessage/ipc/SmsChild.cpp
+++ b/dom/mobilemessage/ipc/SmsChild.cpp
@@ -343,20 +343,20 @@ MobileMessageCursorChild::HandleContinue
 
 void
 MobileMessageCursorChild::DoNotifyResult(const nsTArray<MobileMessageData>& aDataArray)
 {
   const uint32_t length = aDataArray.Length();
   MOZ_ASSERT(length);
 
   AutoFallibleTArray<nsISupports*, 1> autoArray;
-  NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length));
+  NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length, fallible));
 
   AutoFallibleTArray<nsCOMPtr<nsISupports>, 1> messages;
-  NS_ENSURE_TRUE_VOID(messages.SetCapacity(length));
+  NS_ENSURE_TRUE_VOID(messages.SetCapacity(length, fallible));
 
   for (uint32_t i = 0; i < length; i++) {
     nsCOMPtr<nsISupports> message = CreateMessageFromMessageData(aDataArray[i]);
     NS_ENSURE_TRUE_VOID(messages.AppendElement(message));
     NS_ENSURE_TRUE_VOID(autoArray.AppendElement(message.get()));
   }
 
   mCursorCallback->NotifyCursorResult(autoArray.Elements(), length);
@@ -364,20 +364,20 @@ MobileMessageCursorChild::DoNotifyResult
 
 void
 MobileMessageCursorChild::DoNotifyResult(const nsTArray<ThreadData>& aDataArray)
 {
   const uint32_t length = aDataArray.Length();
   MOZ_ASSERT(length);
 
   AutoFallibleTArray<nsISupports*, 1> autoArray;
-  NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length));
+  NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length, fallible));
 
   AutoFallibleTArray<nsCOMPtr<nsISupports>, 1> threads;
-  NS_ENSURE_TRUE_VOID(threads.SetCapacity(length));
+  NS_ENSURE_TRUE_VOID(threads.SetCapacity(length, fallible));
 
   for (uint32_t i = 0; i < length; i++) {
     nsCOMPtr<nsISupports> thread = new MobileMessageThread(aDataArray[i]);
     NS_ENSURE_TRUE_VOID(threads.AppendElement(thread));
     NS_ENSURE_TRUE_VOID(autoArray.AppendElement(thread.get()));
   }
 
   mCursorCallback->NotifyCursorResult(autoArray.Elements(), length);
--- a/dom/plugins/base/nsJSNPRuntime.cpp
+++ b/dom/plugins/base/nsJSNPRuntime.cpp
@@ -78,17 +78,17 @@ typedef js::HashMap<nsJSObjWrapperKey,
                     js::SystemAllocPolicy> JSObjWrapperTable;
 static JSObjWrapperTable sJSObjWrappers;
 
 // Whether it's safe to iterate sJSObjWrappers.  Set to true when sJSObjWrappers
 // has been initialized and is not currently being enumerated.
 static bool sJSObjWrappersAccessible = false;
 
 // Hash of NPObject wrappers that wrap NPObjects as JSObjects.
-static PLDHashTable sNPObjWrappers;
+static PLDHashTable2* sNPObjWrappers;
 
 // Global wrapper count. This includes JSObject wrappers *and*
 // NPObject wrappers. When this count goes to zero, there are no more
 // wrappers and we can kill off hash tables etc.
 static int32_t sWrapperCount;
 
 // The runtime service used to register/unregister GC callbacks.
 nsCOMPtr<nsIJSRuntimeService> sCallbackRuntime;
@@ -396,33 +396,34 @@ DestroyJSObjWrapperTable()
   // hash to prevent leaking it.
   sJSObjWrappers.finish();
   sJSObjWrappersAccessible = false;
 }
 
 static bool
 CreateNPObjWrapperTable()
 {
-  MOZ_ASSERT(!sNPObjWrappers.IsInitialized());
+  MOZ_ASSERT(!sNPObjWrappers);
 
   if (!RegisterGCCallbacks()) {
     return false;
   }
 
-  PL_DHashTableInit(&sNPObjWrappers, PL_DHashGetStubOps(),
-                    sizeof(NPObjWrapperHashEntry));
+  sNPObjWrappers =
+    new PLDHashTable2(PL_DHashGetStubOps(), sizeof(NPObjWrapperHashEntry));
   return true;
 }
 
 static void
 DestroyNPObjWrapperTable()
 {
-  MOZ_ASSERT(sNPObjWrappers.EntryCount() == 0);
-
-  PL_DHashTableFinish(&sNPObjWrappers);
+  MOZ_ASSERT(sNPObjWrappers->EntryCount() == 0);
+
+  delete sNPObjWrappers;
+  sNPObjWrappers = nullptr;
 }
 
 static void
 OnWrapperCreated()
 {
   ++sWrapperCount;
 }
 
@@ -431,17 +432,17 @@ OnWrapperDestroyed()
 {
   NS_ASSERTION(sWrapperCount, "Whaaa, unbalanced created/destroyed calls!");
 
   if (--sWrapperCount == 0) {
     if (sJSObjWrappersAccessible) {
       DestroyJSObjWrapperTable();
     }
 
-    if (sNPObjWrappers.IsInitialized()) {
+    if (sNPObjWrappers) {
       // No more wrappers, and our hash was initialized. Finish the
       // hash to prevent leaking it.
       DestroyNPObjWrapperTable();
     }
 
     UnregisterGCCallbacks();
   }
 }
@@ -1756,46 +1757,46 @@ NPObjWrapper_Convert(JSContext *cx, JS::
   return false;
 }
 
 static void
 NPObjWrapper_Finalize(js::FreeOp *fop, JSObject *obj)
 {
   NPObject *npobj = (NPObject *)::JS_GetPrivate(obj);
   if (npobj) {
-    if (sNPObjWrappers.IsInitialized()) {
-      PL_DHashTableRemove(&sNPObjWrappers, npobj);
+    if (sNPObjWrappers) {
+      PL_DHashTableRemove(sNPObjWrappers, npobj);
     }
   }
 
   if (!sDelayedReleases)
     sDelayedReleases = new nsTArray<NPObject*>;
   sDelayedReleases->AppendElement(npobj);
 }
 
 static void
 NPObjWrapper_ObjectMoved(JSObject *obj, const JSObject *old)
 {
   // The wrapper JSObject has been moved, so we need to update the entry in the
   // sNPObjWrappers hash table, if present.
 
-  if (!sNPObjWrappers.IsInitialized()) {
+  if (!sNPObjWrappers) {
     return;
   }
 
   NPObject *npobj = (NPObject *)::JS_GetPrivate(obj);
   if (!npobj) {
     return;
   }
 
   // Calling PL_DHashTableSearch() will not result in GC.
   JS::AutoSuppressGCAnalysis nogc;
 
   NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
-    (PL_DHashTableSearch(&sNPObjWrappers, npobj));
+    (PL_DHashTableSearch(sNPObjWrappers, npobj));
   MOZ_ASSERT(entry && entry->mJSObj);
   MOZ_ASSERT(entry->mJSObj == old);
   entry->mJSObj = obj;
 }
 
 static bool
 NPObjWrapper_Call(JSContext *cx, unsigned argc, JS::Value *vp)
 {
@@ -1831,33 +1832,33 @@ nsNPObjWrapper::OnDestroy(NPObject *npob
   }
 
   if (npobj->_class == &nsJSObjWrapper::sJSObjWrapperNPClass) {
     // npobj is one of our own, no private data to clean up here.
 
     return;
   }
 
-  if (!sNPObjWrappers.IsInitialized()) {
+  if (!sNPObjWrappers) {
     // No hash yet (or any more), no used wrappers available.
 
     return;
   }
 
   NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
-    (PL_DHashTableSearch(&sNPObjWrappers, npobj));
+    (PL_DHashTableSearch(sNPObjWrappers, npobj));
 
   if (entry && entry->mJSObj) {
     // Found a live NPObject wrapper, null out its JSObjects' private
     // data.
 
     ::JS_SetPrivate(entry->mJSObj, nullptr);
 
     // Remove the npobj from the hash now that it went away.
-    PL_DHashTableRawRemove(&sNPObjWrappers, entry);
+    PL_DHashTableRawRemove(sNPObjWrappers, entry);
 
     // The finalize hook will call OnWrapperDestroyed().
   }
 }
 
 // Look up or create a JSObject that wraps the NPObject npobj.
 
 // static
@@ -1881,25 +1882,25 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JS
   }
 
   if (!npp) {
     NS_ERROR("No npp passed to nsNPObjWrapper::GetNewOrUsed()!");
 
     return nullptr;
   }
 
-  if (!sNPObjWrappers.IsInitialized()) {
+  if (!sNPObjWrappers) {
     // No hash yet (or any more), initialize it.
     if (!CreateNPObjWrapperTable()) {
       return nullptr;
     }
   }
 
   NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
-    (PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
+    (PL_DHashTableAdd(sNPObjWrappers, npobj, fallible));
 
   if (!entry) {
     // Out of memory
     JS_ReportOutOfMemory(cx);
 
     return nullptr;
   }
 
@@ -1911,34 +1912,34 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JS
       return nullptr;
     }
     return obj;
   }
 
   entry->mNPObj = npobj;
   entry->mNpp = npp;
 
-  uint32_t generation = sNPObjWrappers.Generation();
+  uint32_t generation = sNPObjWrappers->Generation();
 
   // No existing JSObject, create one.
 
   JS::Rooted<JSObject*> obj(cx, ::JS_NewObject(cx, js::Jsvalify(&sNPObjectJSWrapperClass)));
 
-  if (generation != sNPObjWrappers.Generation()) {
+  if (generation != sNPObjWrappers->Generation()) {
       // Reload entry if the JS_NewObject call caused a GC and reallocated
       // the table (see bug 445229). This is guaranteed to succeed.
 
-      NS_ASSERTION(PL_DHashTableSearch(&sNPObjWrappers, npobj),
+      NS_ASSERTION(PL_DHashTableSearch(sNPObjWrappers, npobj),
                    "Hashtable didn't find what we just added?");
   }
 
   if (!obj) {
     // OOM? Remove the stale entry from the hash.
 
-    PL_DHashTableRawRemove(&sNPObjWrappers, entry);
+    PL_DHashTableRawRemove(sNPObjWrappers, entry);
 
     return nullptr;
   }
 
   OnWrapperCreated();
 
   entry->mJSObj = obj;
 
@@ -1962,20 +1963,20 @@ struct NppAndCx
 static PLDHashOperator
 NPObjWrapperPluginDestroyedCallback(PLDHashTable *table, PLDHashEntryHdr *hdr,
                                     uint32_t number, void *arg)
 {
   NPObjWrapperHashEntry *entry = (NPObjWrapperHashEntry *)hdr;
   NppAndCx *nppcx = reinterpret_cast<NppAndCx *>(arg);
 
   if (entry->mNpp == nppcx->npp) {
-    // Prevent invalidate() and deallocate() from touching the hash
-    // we're enumerating.
-    const PLDHashTableOps *ops = table->Ops();
-    table->SetOps(nullptr);
+    // HACK: temporarily hide the hash we're enumerating so that invalidate()
+    // and deallocate() don't touch it.
+    PLDHashTable2 *tmp = static_cast<PLDHashTable2*>(table);
+    sNPObjWrappers = nullptr;
 
     NPObject *npobj = entry->mNPObj;
 
     if (npobj->_class && npobj->_class->invalidate) {
       npobj->_class->invalidate(npobj);
     }
 
 #ifdef NS_BUILD_REFCNT_LOGGING
@@ -1993,17 +1994,17 @@ NPObjWrapperPluginDestroyedCallback(PLDH
     if (npobj->_class && npobj->_class->deallocate) {
       npobj->_class->deallocate(npobj);
     } else {
       PR_Free(npobj);
     }
 
     ::JS_SetPrivate(entry->mJSObj, nullptr);
 
-    table->SetOps(ops);
+    sNPObjWrappers = tmp;
 
     if (sDelayedReleases && sDelayedReleases->RemoveElement(npobj)) {
       OnWrapperDestroyed();
     }
 
     return PL_DHASH_REMOVE;
   }
 
@@ -2034,19 +2035,19 @@ nsJSNPRuntime::OnPluginDestroy(NPP npp)
     }
 
     sJSObjWrappersAccessible = true;
   }
 
   // Use the safe JSContext here as we're not always able to find the
   // JSContext associated with the NPP any more.
   AutoSafeJSContext cx;
-  if (sNPObjWrappers.IsInitialized()) {
+  if (sNPObjWrappers) {
     NppAndCx nppcx = { npp, cx };
-    PL_DHashTableEnumerate(&sNPObjWrappers,
+    PL_DHashTableEnumerate(sNPObjWrappers,
                            NPObjWrapperPluginDestroyedCallback, &nppcx);
   }
 }
 
 // static
 void
 nsJSNPRuntime::OnPluginDestroyPending(NPP npp)
 {
@@ -2069,17 +2070,17 @@ static NPP
 LookupNPP(NPObject *npobj)
 {
   if (npobj->_class == &nsJSObjWrapper::sJSObjWrapperNPClass) {
     nsJSObjWrapper* o = static_cast<nsJSObjWrapper*>(npobj);
     return o->mNpp;
   }
 
   NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
-    (PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
+    (PL_DHashTableAdd(sNPObjWrappers, npobj, fallible));
 
   if (!entry) {
     return nullptr;
   }
 
   NS_ASSERTION(entry->mNpp, "Live NPObject entry w/o an NPP!");
 
   return entry->mNpp;
--- a/dom/plugins/ipc/PluginScriptableObjectChild.cpp
+++ b/dom/plugins/ipc/PluginScriptableObjectChild.cpp
@@ -773,17 +773,17 @@ PluginScriptableObjectChild::AnswerInvok
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     ConvertToVariant(aArgs[index], convertedArgs[index]);
   }
@@ -843,17 +843,17 @@ PluginScriptableObjectChild::AnswerInvok
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     ConvertToVariant(aArgs[index], convertedArgs[index]);
   }
@@ -1094,17 +1094,17 @@ PluginScriptableObjectChild::AnswerConst
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     ConvertToVariant(aArgs[index], convertedArgs[index]);
   }
--- a/dom/plugins/ipc/PluginScriptableObjectParent.cpp
+++ b/dom/plugins/ipc/PluginScriptableObjectParent.cpp
@@ -819,17 +819,17 @@ PluginScriptableObjectParent::AnswerInvo
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     if (!ConvertToVariant(aArgs[index], convertedArgs[index], instance)) {
       // Don't leak things we've already converted!
@@ -902,17 +902,17 @@ PluginScriptableObjectParent::AnswerInvo
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     if (!ConvertToVariant(aArgs[index], convertedArgs[index], instance)) {
       // Don't leak things we've already converted!
@@ -1222,17 +1222,17 @@ PluginScriptableObjectParent::AnswerCons
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   AutoFallibleTArray<NPVariant, 10> convertedArgs;
   uint32_t argCount = aArgs.Length();
 
-  if (!convertedArgs.SetLength(argCount)) {
+  if (!convertedArgs.SetLength(argCount, fallible)) {
     *aResult = void_t();
     *aSuccess = false;
     return true;
   }
 
   for (uint32_t index = 0; index < argCount; index++) {
     if (!ConvertToVariant(aArgs[index], convertedArgs[index], instance)) {
       // Don't leak things we've already converted!
--- a/dom/quota/QuotaManager.cpp
+++ b/dom/quota/QuotaManager.cpp
@@ -2010,16 +2010,45 @@ QuotaManager::InitializeRepository(Persi
   }
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
   }
 
   return NS_OK;
 }
 
+namespace {
+
+// The Cache API was creating top level morgue directories by accident for
+// a short time in nightly.  This unfortunately prevents all storage from
+// working.  So recover these profiles by removing these corrupt directories.
+// This should be removed at some point in the future.
+bool
+MaybeRemoveCorruptDirectory(const nsAString& aLeafName, nsIFile* aDir)
+{
+#ifdef NIGHTLY_BUILD
+  MOZ_ASSERT(aDir);
+
+  if (aLeafName != NS_LITERAL_STRING("morgue")) {
+    return false;
+  }
+
+  NS_WARNING("QuotaManager removing corrupt morgue directory.");
+
+  nsresult rv = aDir->Remove(true /* recursive */);
+  NS_ENSURE_SUCCESS(rv, false);
+
+  return true;
+#else
+  return false
+#endif // NIGHTLY_BUILD
+}
+
+} // anonymous namespace
+
 nsresult
 QuotaManager::InitializeOrigin(PersistenceType aPersistenceType,
                                const nsACString& aGroup,
                                const nsACString& aOrigin,
                                bool aIsApp,
                                int64_t aAccessTime,
                                nsIFile* aDirectory)
 {
@@ -2067,16 +2096,20 @@ QuotaManager::InitializeOrigin(Persisten
     rv = file->IsDirectory(&isDirectory);
     NS_ENSURE_SUCCESS(rv, rv);
 
     if (!isDirectory) {
       NS_WARNING("Unknown file found!");
       return NS_ERROR_UNEXPECTED;
     }
 
+    if (MaybeRemoveCorruptDirectory(leafName, file)) {
+      continue;
+    }
+
     Client::Type clientType;
     rv = Client::TypeFromText(leafName, clientType);
     if (NS_FAILED(rv)) {
       NS_WARNING("Unknown directory found!");
       return NS_ERROR_UNEXPECTED;
     }
 
     rv = mClients[clientType]->InitOrigin(aPersistenceType, aGroup, aOrigin,
@@ -4168,16 +4201,20 @@ AsyncUsageRunnable::AddToUsage(QuotaMana
         NS_ENSURE_SUCCESS(rv, rv);
 
         if (!isDirectory) {
           NS_WARNING("Unknown file found!");
           return NS_ERROR_UNEXPECTED;
         }
       }
 
+      if (MaybeRemoveCorruptDirectory(leafName, file)) {
+        continue;
+      }
+
       Client::Type clientType;
       rv = Client::TypeFromText(leafName, clientType);
       if (NS_FAILED(rv)) {
         NS_WARNING("Unknown directory found!");
         if (!initialized) {
           return NS_ERROR_UNEXPECTED;
         }
         continue;
--- a/dom/smil/nsSMILAnimationFunction.cpp
+++ b/dom/smil/nsSMILAnimationFunction.cpp
@@ -775,17 +775,17 @@ nsSMILAnimationFunction::GetValues(const
                          preventCachingOfSandwich);
     parseOk &= ParseAttr(nsGkAtoms::by,   aSMILAttr, by,
                          preventCachingOfSandwich);
 
     if (preventCachingOfSandwich) {
       mValueNeedsReparsingEverySample = true;
     }
 
-    if (!parseOk || !result.SetCapacity(2)) {
+    if (!parseOk || !result.SetCapacity(2, mozilla::fallible)) {
       return NS_ERROR_FAILURE;
     }
 
     if (!to.IsNull()) {
       if (!from.IsNull()) {
         result.AppendElement(from);
         result.AppendElement(to);
       } else {
--- a/dom/svg/DOMSVGLengthList.cpp
+++ b/dom/svg/DOMSVGLengthList.cpp
@@ -126,17 +126,17 @@ DOMSVGLengthList::InternalListLengthWill
 
   // If our length will decrease, notify the items that will be removed:
   for (uint32_t i = aNewLength; i < oldLength; ++i) {
     if (mItems[i]) {
       mItems[i]->RemovingFromList();
     }
   }
 
-  if (!mItems.SetLength(aNewLength)) {
+  if (!mItems.SetLength(aNewLength, fallible)) {
     // We silently ignore SetLength OOM failure since being out of sync is safe
     // so long as we have *fewer* items than our internal list.
     mItems.Clear();
     return;
   }
 
   // If our length has increased, null out the new pointers:
   for (uint32_t i = oldLength; i < aNewLength; ++i) {
@@ -250,17 +250,17 @@ DOMSVGLengthList::InsertItemBefore(DOMSV
     error.Throw(NS_ERROR_DOM_SVG_WRONG_TYPE_ERR);
     return nullptr;
   }
   if (domItem->HasOwner() || domItem->IsReflectingAttribute()) {
     domItem = domItem->Copy(); // must do this before changing anything!
   }
 
   // Ensure we have enough memory so we can avoid complex error handling below:
-  if (!mItems.SetCapacity(mItems.Length() + 1) ||
+  if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
       !InternalList().SetCapacity(InternalList().Length() + 1)) {
     error.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   AutoChangeLengthListNotifier notifier(this);
   // Now that we know we're inserting, keep animVal list in sync as necessary.
   MaybeInsertNullInAnimValListAt(index);
--- a/dom/svg/DOMSVGNumberList.cpp
+++ b/dom/svg/DOMSVGNumberList.cpp
@@ -127,17 +127,17 @@ DOMSVGNumberList::InternalListLengthWill
 
   // If our length will decrease, notify the items that will be removed:
   for (uint32_t i = aNewLength; i < oldLength; ++i) {
     if (mItems[i]) {
       mItems[i]->RemovingFromList();
     }
   }
 
-  if (!mItems.SetLength(aNewLength)) {
+  if (!mItems.SetLength(aNewLength, fallible)) {
     // We silently ignore SetLength OOM failure since being out of sync is safe
     // so long as we have *fewer* items than our internal list.
     mItems.Clear();
     return;
   }
 
   // If our length has increased, null out the new pointers:
   for (uint32_t i = oldLength; i < aNewLength; ++i) {
@@ -234,17 +234,17 @@ DOMSVGNumberList::InsertItemBefore(DOMSV
     error.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
     return nullptr;
   }
 
   // must do this before changing anything!
   nsRefPtr<DOMSVGNumber> domItem = aItem.HasOwner() ? aItem.Clone() : &aItem;
 
   // Ensure we have enough memory so we can avoid complex error handling below:
-  if (!mItems.SetCapacity(mItems.Length() + 1) ||
+  if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
       !InternalList().SetCapacity(InternalList().Length() + 1)) {
     error.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   AutoChangeNumberListNotifier notifier(this);
   // Now that we know we're inserting, keep animVal list in sync as necessary.
   MaybeInsertNullInAnimValListAt(index);
--- a/dom/svg/DOMSVGPathSegList.cpp
+++ b/dom/svg/DOMSVGPathSegList.cpp
@@ -365,18 +365,19 @@ DOMSVGPathSegList::InsertItemBefore(DOMS
   nsRefPtr<DOMSVGPathSeg> domItem = &aNewItem;
   if (domItem->HasOwner()) {
     domItem = domItem->Clone(); // must do this before changing anything!
   }
 
   uint32_t argCount = SVGPathSegUtils::ArgCountForType(domItem->Type());
 
   // Ensure we have enough memory so we can avoid complex error handling below:
-  if (!mItems.SetCapacity(mItems.Length() + 1) ||
-      !InternalList().mData.SetCapacity(InternalList().mData.Length() + 1 + argCount)) {
+  if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
+      !InternalList().mData.SetCapacity(InternalList().mData.Length() + 1 + argCount,
+                                        fallible)) {
     aError.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   AutoChangePathSegListNotifier notifier(this);
   // Now that we know we're inserting, keep animVal list in sync as necessary.
   MaybeInsertNullInAnimValListAt(aIndex, internalIndex, argCount);
 
@@ -432,20 +433,19 @@ DOMSVGPathSegList::ReplaceItem(DOMSVGPat
   // intentionally putting it in a signed variable, because we're going to
   // subtract these values and might produce something negative.
   int32_t oldArgCount = SVGPathSegUtils::ArgCountForType(oldType);
   int32_t newArgCount = SVGPathSegUtils::ArgCountForType(domItem->Type());
 
   float segAsRaw[1 + NS_SVG_PATH_SEG_MAX_ARGS];
   domItem->ToSVGPathSegEncodedData(segAsRaw);
 
-  bool ok = !!InternalList().mData.ReplaceElementsAt(
-                  internalIndex, 1 + oldArgCount,
-                  segAsRaw, 1 + newArgCount);
-  if (!ok) {
+  if (!InternalList().mData.ReplaceElementsAt(internalIndex, 1 + oldArgCount,
+                                              segAsRaw, 1 + newArgCount,
+                                              fallible)) {
     aError.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
   ItemAt(aIndex) = domItem;
 
   // This MUST come after the ToSVGPathSegEncodedData call, otherwise that call
   // would end up reading bad data from InternalList()!
   domItem->InsertingIntoList(this, aIndex, IsAnimValList());
--- a/dom/svg/DOMSVGPointList.cpp
+++ b/dom/svg/DOMSVGPointList.cpp
@@ -161,17 +161,17 @@ DOMSVGPointList::InternalListWillChangeT
 
   // If our length will decrease, notify the items that will be removed:
   for (uint32_t i = newLength; i < oldLength; ++i) {
     if (mItems[i]) {
       mItems[i]->RemovingFromList();
     }
   }
 
-  if (!mItems.SetLength(newLength)) {
+  if (!mItems.SetLength(newLength, fallible)) {
     // We silently ignore SetLength OOM failure since being out of sync is safe
     // so long as we have *fewer* items than our internal list.
     mItems.Clear();
     return;
   }
 
   // If our length has increased, null out the new pointers:
   for (uint32_t i = oldLength; i < newLength; ++i) {
@@ -301,17 +301,17 @@ DOMSVGPointList::InsertItemBefore(nsISVG
 
   nsCOMPtr<nsISVGPoint> domItem = &aNewItem;
   if (domItem->HasOwner() || domItem->IsReadonly() ||
       domItem->IsTranslatePoint()) {
     domItem = domItem->Copy(); // must do this before changing anything!
   }
 
   // Ensure we have enough memory so we can avoid complex error handling below:
-  if (!mItems.SetCapacity(mItems.Length() + 1) ||
+  if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
       !InternalList().SetCapacity(InternalList().Length() + 1)) {
     aError.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   AutoChangePointListNotifier notifier(this);
   // Now that we know we're inserting, keep animVal list in sync as necessary.
   MaybeInsertNullInAnimValListAt(aIndex);
--- a/dom/svg/DOMSVGTransformList.cpp
+++ b/dom/svg/DOMSVGTransformList.cpp
@@ -127,17 +127,17 @@ DOMSVGTransformList::InternalListLengthW
 
   // If our length will decrease, notify the items that will be removed:
   for (uint32_t i = aNewLength; i < oldLength; ++i) {
     if (mItems[i]) {
       mItems[i]->RemovingFromList();
     }
   }
 
-  if (!mItems.SetLength(aNewLength)) {
+  if (!mItems.SetLength(aNewLength, fallible)) {
     // We silently ignore SetLength OOM failure since being out of sync is safe
     // so long as we have *fewer* items than our internal list.
     mItems.Clear();
     return;
   }
 
   // If our length has increased, null out the new pointers:
   for (uint32_t i = oldLength; i < aNewLength; ++i) {
@@ -242,17 +242,17 @@ DOMSVGTransformList::InsertItemBefore(SV
   }
 
   nsRefPtr<SVGTransform> domItem = &newItem;
   if (newItem.HasOwner()) {
     domItem = newItem.Clone(); // must do this before changing anything!
   }
 
   // Ensure we have enough memory so we can avoid complex error handling below:
-  if (!mItems.SetCapacity(mItems.Length() + 1) ||
+  if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
       !InternalList().SetCapacity(InternalList().Length() + 1)) {
     error.Throw(NS_ERROR_OUT_OF_MEMORY);
     return nullptr;
   }
 
   AutoChangeTransformListNotifier notifier(this);
   // Now that we know we're inserting, keep animVal list in sync as necessary.
   MaybeInsertNullInAnimValListAt(index);
--- a/dom/svg/SVGLengthList.cpp
+++ b/dom/svg/SVGLengthList.cpp
@@ -13,17 +13,17 @@
 #include "SVGContentUtils.h"
 #include "SVGLength.h"
 
 namespace mozilla {
 
 nsresult
 SVGLengthList::CopyFrom(const SVGLengthList& rhs)
 {
-  if (!mLengths.SetCapacity(rhs.Length())) {
+  if (!mLengths.SetCapacity(rhs.Length(), fallible)) {
     // Yes, we do want fallible alloc here
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mLengths = rhs.mLengths;
   return NS_OK;
 }
 
 void
--- a/dom/svg/SVGLengthList.h
+++ b/dom/svg/SVGLengthList.h
@@ -54,17 +54,17 @@ public:
 
   const SVGLength& operator[](uint32_t aIndex) const {
     return mLengths[aIndex];
   }
 
   bool operator==(const SVGLengthList& rhs) const;
 
   bool SetCapacity(uint32_t size) {
-    return mLengths.SetCapacity(size);
+    return mLengths.SetCapacity(size, fallible);
   }
 
   void Compact() {
     mLengths.Compact();
   }
 
   // Access to methods that can modify objects of this type is deliberately
   // limited. This is to reduce the chances of someone modifying objects of
@@ -85,17 +85,17 @@ protected:
     return mLengths[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aNumberOfItems) {
-    return mLengths.SetLength(aNumberOfItems);
+    return mLengths.SetLength(aNumberOfItems, fallible);
   }
 
 private:
 
   // Marking the following private only serves to show which methods are only
   // used by our friend classes (as opposed to our subclasses) - it doesn't
   // really provide additional safety.
 
--- a/dom/svg/SVGMotionSMILType.cpp
+++ b/dom/svg/SVGMotionSMILType.cpp
@@ -194,17 +194,17 @@ SVGMotionSMILType::Assign(nsSMILValue& a
 {
   MOZ_ASSERT(aDest.mType == aSrc.mType, "Incompatible SMIL types");
   MOZ_ASSERT(aDest.mType == this, "Unexpected SMIL type");
 
   const MotionSegmentArray& srcArr = ExtractMotionSegmentArray(aSrc);
   MotionSegmentArray& dstArr = ExtractMotionSegmentArray(aDest);
 
   // Ensure we have sufficient memory.
-  if (!dstArr.SetCapacity(srcArr.Length())) {
+  if (!dstArr.SetCapacity(srcArr.Length(), fallible)) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   dstArr = srcArr; // Do the assignment.
   return NS_OK;
 }
 
 bool
--- a/dom/svg/SVGNumberList.cpp
+++ b/dom/svg/SVGNumberList.cpp
@@ -12,17 +12,17 @@
 #include "nsTextFormatter.h"
 #include "SVGContentUtils.h"
 
 namespace mozilla {
 
 nsresult
 SVGNumberList::CopyFrom(const SVGNumberList& rhs)
 {
-  if (!mNumbers.SetCapacity(rhs.Length())) {
+  if (!mNumbers.SetCapacity(rhs.Length(), fallible)) {
     // Yes, we do want fallible alloc here
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mNumbers = rhs.mNumbers;
   return NS_OK;
 }
 
 void
--- a/dom/svg/SVGNumberList.h
+++ b/dom/svg/SVGNumberList.h
@@ -55,17 +55,17 @@ public:
     return mNumbers[aIndex];
   }
 
   bool operator==(const SVGNumberList& rhs) const {
     return mNumbers == rhs.mNumbers;
   }
 
   bool SetCapacity(uint32_t size) {
-    return mNumbers.SetCapacity(size);
+    return mNumbers.SetCapacity(size, fallible);
   }
 
   void Compact() {
     mNumbers.Compact();
   }
 
   // Access to methods that can modify objects of this type is deliberately
   // limited. This is to reduce the chances of someone modifying objects of
@@ -86,17 +86,17 @@ protected:
     return mNumbers[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aNumberOfItems) {
-    return mNumbers.SetLength(aNumberOfItems);
+    return mNumbers.SetLength(aNumberOfItems, fallible);
   }
 
 private:
 
   // Marking the following private only serves to show which methods are only
   // used by our friend classes (as opposed to our subclasses) - it doesn't
   // really provide additional safety.
 
--- a/dom/svg/SVGPathData.cpp
+++ b/dom/svg/SVGPathData.cpp
@@ -29,17 +29,17 @@ static bool IsMoveto(uint16_t aSegType)
 {
   return aSegType == PATHSEG_MOVETO_ABS ||
          aSegType == PATHSEG_MOVETO_REL;
 }
 
 nsresult
 SVGPathData::CopyFrom(const SVGPathData& rhs)
 {
-  if (!mData.SetCapacity(rhs.mData.Length())) {
+  if (!mData.SetCapacity(rhs.mData.Length(), fallible)) {
     // Yes, we do want fallible alloc here
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mData = rhs.mData;
   return NS_OK;
 }
 
 void
@@ -76,17 +76,17 @@ SVGPathData::SetValueFromString(const ns
   return pathParser.Parse() ? NS_OK : NS_ERROR_DOM_SYNTAX_ERR;
 }
 
 nsresult
 SVGPathData::AppendSeg(uint32_t aType, ...)
 {
   uint32_t oldLength = mData.Length();
   uint32_t newLength = oldLength + 1 + SVGPathSegUtils::ArgCountForType(aType);
-  if (!mData.SetLength(newLength)) {
+  if (!mData.SetLength(newLength, fallible)) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   mData[oldLength] = SVGPathSegUtils::EncodeType(aType);
   va_list args;
   va_start(args, aType);
   for (uint32_t i = oldLength + 1; i < newLength; ++i) {
     // NOTE! 'float' is promoted to 'double' when passed through '...'!
--- a/dom/svg/SVGPathData.h
+++ b/dom/svg/SVGPathData.h
@@ -130,17 +130,17 @@ public:
     // We use memcmp so that we don't need to worry that the data encoded in
     // the first float may have the same bit pattern as a NaN.
     return mData.Length() == rhs.mData.Length() &&
            memcmp(mData.Elements(), rhs.mData.Elements(),
                   mData.Length() * sizeof(float)) == 0;
   }
 
   bool SetCapacity(uint32_t aSize) {
-    return mData.SetCapacity(aSize);
+    return mData.SetCapacity(aSize, fallible);
   }
 
   void Compact() {
     mData.Compact();
   }
 
 
   float GetPathLength() const;
@@ -197,17 +197,17 @@ protected:
     return mData[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aLength) {
-    return mData.SetLength(aLength);
+    return mData.SetLength(aLength, fallible);
   }
 
   nsresult SetValueFromString(const nsAString& aValue);
 
   void Clear() {
     mData.Clear();
   }
 
--- a/dom/svg/SVGPointList.h
+++ b/dom/svg/SVGPointList.h
@@ -63,17 +63,17 @@ public:
   bool operator==(const SVGPointList& rhs) const {
     // memcmp can be faster than |mItems == rhs.mItems|
     return mItems.Length() == rhs.mItems.Length() &&
            memcmp(mItems.Elements(), rhs.mItems.Elements(),
                   mItems.Length() * sizeof(SVGPoint)) == 0;
   }
 
   bool SetCapacity(uint32_t aSize) {
-    return mItems.SetCapacity(aSize);
+    return mItems.SetCapacity(aSize, fallible);
   }
 
   void Compact() {
     mItems.Compact();
   }
 
   // Access to methods that can modify objects of this type is deliberately
   // limited. This is to reduce the chances of someone modifying objects of
@@ -94,17 +94,17 @@ protected:
     return mItems[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aNumberOfItems) {
-    return mItems.SetLength(aNumberOfItems);
+    return mItems.SetLength(aNumberOfItems, fallible);
   }
 
 private:
 
   // Marking the following private only serves to show which methods are only
   // used by our friend classes (as opposed to our subclasses) - it doesn't
   // really provide additional safety.
 
--- a/dom/svg/SVGStringList.cpp
+++ b/dom/svg/SVGStringList.cpp
@@ -11,17 +11,17 @@
 #include "nsWhitespaceTokenizer.h"
 #include "SVGContentUtils.h"
 
 namespace mozilla {
 
 nsresult
 SVGStringList::CopyFrom(const SVGStringList& rhs)
 {
-  if (!mStrings.SetCapacity(rhs.Length())) {
+  if (!mStrings.SetCapacity(rhs.Length(), fallible)) {
     // Yes, we do want fallible alloc here
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mStrings = rhs.mStrings;
   mIsSet = true;
   return NS_OK;
 }
 
--- a/dom/svg/SVGStringList.h
+++ b/dom/svg/SVGStringList.h
@@ -51,17 +51,17 @@ public:
     return mStrings[aIndex];
   }
 
   bool operator==(const SVGStringList& rhs) const {
     return mStrings == rhs.mStrings;
   }
 
   bool SetCapacity(uint32_t size) {
-    return mStrings.SetCapacity(size);
+    return mStrings.SetCapacity(size, fallible);
   }
 
   void Compact() {
     mStrings.Compact();
   }
 
   // Returns true if the value of this stringlist has been explicitly
   // set by markup or a DOM call, false otherwise.
@@ -87,17 +87,17 @@ protected:
     return mStrings[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aStringOfItems) {
-    return mStrings.SetLength(aStringOfItems);
+    return mStrings.SetLength(aStringOfItems, fallible);
   }
 
 private:
 
   // Marking the following private only serves to show which methods are only
   // used by our friend classes (as opposed to our subclasses) - it doesn't
   // really provide additional safety.
 
--- a/dom/svg/SVGTransformList.cpp
+++ b/dom/svg/SVGTransformList.cpp
@@ -38,17 +38,17 @@ nsresult
 SVGTransformList::CopyFrom(const SVGTransformList& rhs)
 {
   return CopyFrom(rhs.mItems);
 }
 
 nsresult
 SVGTransformList::CopyFrom(const nsTArray<nsSVGTransform>& aTransformArray)
 {
-  if (!mItems.SetCapacity(aTransformArray.Length())) {
+  if (!mItems.SetCapacity(aTransformArray.Length(), fallible)) {
     // Yes, we do want fallible alloc here
     return NS_ERROR_OUT_OF_MEMORY;
   }
   mItems = aTransformArray;
   return NS_OK;
 }
 
 void
--- a/dom/svg/SVGTransformList.h
+++ b/dom/svg/SVGTransformList.h
@@ -55,17 +55,17 @@ public:
     return mItems[aIndex];
   }
 
   bool operator==(const SVGTransformList& rhs) const {
     return mItems == rhs.mItems;
   }
 
   bool SetCapacity(uint32_t size) {
-    return mItems.SetCapacity(size);
+    return mItems.SetCapacity(size, fallible);
   }
 
   void Compact() {
     mItems.Compact();
   }
 
   gfxMatrix GetConsolidationMatrix() const;
 
@@ -89,17 +89,17 @@ protected:
     return mItems[aIndex];
   }
 
   /**
    * This may fail (return false) on OOM if the internal capacity is being
    * increased, in which case the list will be left unmodified.
    */
   bool SetLength(uint32_t aNumberOfItems) {
-    return mItems.SetLength(aNumberOfItems);
+    return mItems.SetLength(aNumberOfItems, fallible);
   }
 
 private:
 
   // Marking the following private only serves to show which methods are only
   // used by our friend classes (as opposed to our subclasses) - it doesn't
   // really provide additional safety.
 
--- a/dom/svg/SVGTransformListSMILType.cpp
+++ b/dom/svg/SVGTransformListSMILType.cpp
@@ -45,17 +45,17 @@ SVGTransformListSMILType::Assign(nsSMILV
   NS_PRECONDITION(aDest.mType == aSrc.mType, "Incompatible SMIL types");
   NS_PRECONDITION(aDest.mType == this, "Unexpected SMIL value");
 
   const TransformArray* srcTransforms =
     static_cast<const TransformArray*>(aSrc.mU.mPtr);
   TransformArray* dstTransforms = static_cast<TransformArray*>(aDest.mU.mPtr);
 
   // Before we assign, ensure we have sufficient memory
-  bool result = dstTransforms->SetCapacity(srcTransforms->Length());
+  bool result = dstTransforms->SetCapacity(srcTransforms->Length(), fallible);
   NS_ENSURE_TRUE(result,NS_ERROR_OUT_OF_MEMORY);
 
   *dstTransforms = *srcTransforms;
 
   return NS_OK;
 }
 
 bool
@@ -331,17 +331,17 @@ SVGTransformListSMILType::AppendTransfor
 bool
 SVGTransformListSMILType::AppendTransforms(const SVGTransformList& aList,
                                            nsSMILValue& aValue)
 {
   NS_PRECONDITION(aValue.mType == Singleton(), "Unexpected SMIL value type");
 
   TransformArray& transforms = *static_cast<TransformArray*>(aValue.mU.mPtr);
 
-  if (!transforms.SetCapacity(transforms.Length() + aList.Length()))
+  if (!transforms.SetCapacity(transforms.Length() + aList.Length(), fallible))
     return false;
 
   for (uint32_t i = 0; i < aList.Length(); ++i) {
     // No need to check the return value below since we have already allocated
     // the necessary space
     transforms.AppendElement(SVGTransformSMILData(aList[i]));
   }
   return true;
@@ -353,17 +353,17 @@ SVGTransformListSMILType::GetTransforms(
                                         FallibleTArray<nsSVGTransform>& aTransforms)
 {
   NS_PRECONDITION(aValue.mType == Singleton(), "Unexpected SMIL value type");
 
   const TransformArray& smilTransforms =
     *static_cast<const TransformArray*>(aValue.mU.mPtr);
 
   aTransforms.Clear();
-  if (!aTransforms.SetCapacity(smilTransforms.Length()))
+  if (!aTransforms.SetCapacity(smilTransforms.Length(), fallible))
       return false;
 
   for (uint32_t i = 0; i < smilTransforms.Length(); ++i) {
     // No need to check the return value below since we have already allocated
     // the necessary space
     aTransforms.AppendElement(smilTransforms[i].ToSVGTransform());
   }
   return true;
--- a/gfx/2d/2D.h
+++ b/gfx/2d/2D.h
@@ -150,17 +150,17 @@ struct DrawSurfaceOptions {
 
 };
 
 /**
  * This class is used to store gradient stops, it can only be used with a
  * matching DrawTarget. Not adhering to this condition will make a draw call
  * fail.
  */
-class GradientStops : public external::AtomicRefCounted<GradientStops>
+class GradientStops : public RefCounted<GradientStops>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(GradientStops)
   virtual ~GradientStops() {}
 
   virtual BackendType GetBackendType() const = 0;
   virtual bool IsValid() const { return true; }
 
@@ -313,17 +313,17 @@ public:
 class StoredPattern;
 class DrawTargetCaptureImpl;
 
 /**
  * This is the base class for source surfaces. These objects are surfaces
  * which may be used as a source in a SurfacePattern or a DrawSurface call.
  * They cannot be drawn to directly.
  */
-class SourceSurface : public external::AtomicRefCounted<SourceSurface>
+class SourceSurface : public RefCounted<SourceSurface>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(SourceSurface)
   virtual ~SourceSurface() {}
 
   virtual SurfaceType GetType() const = 0;
   virtual IntSize GetSize() const = 0;
   virtual SurfaceFormat GetFormat() const = 0;
@@ -471,17 +471,17 @@ public:
 };
 
 class PathBuilder;
 class FlattenedPath;
 
 /** The path class is used to create (sets of) figures of any shape that can be
  * filled or stroked to a DrawTarget
  */
-class Path : public external::AtomicRefCounted<Path>
+class Path : public RefCounted<Path>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(Path)
   virtual ~Path();
   
   virtual BackendType GetBackendType() const = 0;
 
   /** This returns a PathBuilder object that contains a copy of the contents of
@@ -572,17 +572,17 @@ struct GlyphBuffer
   const Glyph *mGlyphs; //!< A pointer to a buffer of glyphs. Managed by the caller.
   uint32_t mNumGlyphs;  //!< Number of glyphs mGlyphs points to.
 };
 
 /** This class is an abstraction of a backend/platform specific font object
  * at a particular size. It is passed into text drawing calls to describe
  * the font used for the drawing call.
  */
-class ScaledFont : public external::AtomicRefCounted<ScaledFont>
+class ScaledFont : public RefCounted<ScaledFont>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(ScaledFont)
   virtual ~ScaledFont() {}
 
   typedef void (*FontFileDataOutput)(const uint8_t *aData, uint32_t aLength, uint32_t aIndex, Float aGlyphSize, void *aBaton);
 
   virtual FontType GetType() const = 0;
@@ -617,17 +617,17 @@ protected:
 };
 
 /** This class is designed to allow passing additional glyph rendering
  * parameters to the glyph drawing functions. This is an empty wrapper class
  * merely used to allow holding on to and passing around platform specific
  * parameters. This is because different platforms have unique rendering
  * parameters.
  */
-class GlyphRenderingOptions : public external::AtomicRefCounted<GlyphRenderingOptions>
+class GlyphRenderingOptions : public RefCounted<GlyphRenderingOptions>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(GlyphRenderingOptions)
   virtual ~GlyphRenderingOptions() {}
 
   virtual FontType GetType() const = 0;
 
 protected:
@@ -636,17 +636,17 @@ protected:
 
 class DrawTargetCapture;
 
 /** This is the main class used for all the drawing. It is created through the
  * factory and accepts drawing commands. The results of drawing to a target
  * may be used either through a Snapshot or by flushing the target and directly
  * accessing the backing store a DrawTarget was created with.
  */
-class DrawTarget : public external::AtomicRefCounted<DrawTarget>
+class DrawTarget : public RefCounted<DrawTarget>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(DrawTarget)
   DrawTarget() : mTransformDirty(false), mPermitSubpixelAA(false) {}
   virtual ~DrawTarget() {}
 
   virtual DrawTargetType GetType() const = 0;
 
--- a/gfx/2d/DrawCommand.h
+++ b/gfx/2d/DrawCommand.h
@@ -143,17 +143,17 @@ private:
   DrawOptions mOptions;
 };
 
 class DrawFilterCommand : public DrawingCommand
 {
 public:
   DrawFilterCommand(FilterNode* aFilter, const Rect& aSourceRect,
                     const Point& aDestPoint, const DrawOptions& aOptions)
-    : DrawingCommand(CommandType::DRAWFILTER)
+    : DrawingCommand(CommandType::DRAWSURFACE)
     , mFilter(aFilter), mSourceRect(aSourceRect)
     , mDestPoint(aDestPoint), mOptions(aOptions)
   {
   }
 
   virtual void ExecuteOnDT(DrawTarget* aDT, const Matrix&)
   {
     aDT->DrawFilter(mFilter, mSourceRect, mDestPoint, mOptions);
@@ -161,46 +161,16 @@ public:
 
 private:
   RefPtr<FilterNode> mFilter;
   Rect mSourceRect;
   Point mDestPoint;
   DrawOptions mOptions;
 };
 
-class DrawSurfaceWithShadowCommand : public DrawingCommand
-{
-public:
-  DrawSurfaceWithShadowCommand(SourceSurface* aSurface, const Point& aDest,
-                               const Color& aColor, const Point& aOffset,
-                               Float aSigma, CompositionOp aOperator)
-    : DrawingCommand(CommandType::DRAWSURFACEWITHSHADOW)
-    , mSurface(aSurface)
-    , mDest(aDest)
-    , mColor(aColor)
-    , mOffset(aOffset)
-    , mSigma(aSigma)
-    , mOperator(aOperator)
-  {
-  }
-
-  virtual void ExecuteOnDT(DrawTarget* aDT, const Matrix&)
-  {
-    aDT->DrawSurfaceWithShadow(mSurface, mDest, mColor, mOffset, mSigma, mOperator);
-  }
-
-private:
-  RefPtr<SourceSurface> mSurface;
-  Point mDest;
-  Color mColor;
-  Point mOffset;
-  Float mSigma;
-  CompositionOp mOperator;
-};
-
 class ClearRectCommand : public DrawingCommand
 {
 public:
   explicit ClearRectCommand(const Rect& aRect)
     : DrawingCommand(CommandType::CLEARRECT)
     , mRect(aRect)
   {
   }
--- a/gfx/2d/DrawTargetCapture.cpp
+++ b/gfx/2d/DrawTargetCapture.cpp
@@ -25,17 +25,16 @@ DrawTargetCaptureImpl::~DrawTargetCaptur
 bool
 DrawTargetCaptureImpl::Init(const IntSize& aSize, DrawTarget* aRefDT)
 {
   if (!aRefDT) {
     return false;
   }
 
   mRefDT = aRefDT;
-  mFormat = mRefDT->GetFormat();
 
   mSize = aSize;
   return true;
 }
 
 TemporaryRef<SourceSurface>
 DrawTargetCaptureImpl::Snapshot()
 {
@@ -66,28 +65,16 @@ DrawTargetCaptureImpl::DrawFilter(Filter
                                   const DrawOptions &aOptions)
 {
   // @todo XXX - this won't work properly long term yet due to filternodes not
   // being immutable.
   AppendCommand(DrawFilterCommand)(aNode, aSourceRect, aDestPoint, aOptions);
 }
 
 void
-DrawTargetCaptureImpl::DrawSurfaceWithShadow(SourceSurface *aSurface,
-                                             const Point &aDest,
-                                             const Color &aColor,
-                                             const Point &aOffset,
-                                             Float aSigma,
-                                             CompositionOp aOperator)
-{
-  aSurface->GuaranteePersistance();
-  AppendCommand(DrawSurfaceWithShadowCommand)(aSurface, aDest, aColor, aOffset, aSigma, aOperator);
-}
-
-void
 DrawTargetCaptureImpl::ClearRect(const Rect &aRect)
 {
   AppendCommand(ClearRectCommand)(aRect);
 }
 
 void
 DrawTargetCaptureImpl::MaskSurface(const Pattern &aSource,
                                    SourceSurface *aMask,
@@ -186,17 +173,16 @@ DrawTargetCaptureImpl::PopClip()
 {
   AppendCommand(PopClipCommand)();
 }
 
 void
 DrawTargetCaptureImpl::SetTransform(const Matrix& aTransform)
 {
   AppendCommand(SetTransformCommand)(aTransform);
-  mTransform = aTransform;
 }
 
 void
 DrawTargetCaptureImpl::ReplayToDrawTarget(DrawTarget* aDT, const Matrix& aTransform)
 {
   uint8_t* start = &mDrawCommandStorage.front();
 
   uint8_t* current = start;
--- a/gfx/2d/DrawTargetCapture.h
+++ b/gfx/2d/DrawTargetCapture.h
@@ -40,17 +40,17 @@ public:
                           const Rect &aSourceRect,
                           const Point &aDestPoint,
                           const DrawOptions &aOptions = DrawOptions());
   virtual void DrawSurfaceWithShadow(SourceSurface *aSurface,
                                      const Point &aDest,
                                      const Color &aColor,
                                      const Point &aOffset,
                                      Float aSigma,
-                                     CompositionOp aOperator);
+                                     CompositionOp aOperator) { /* Not implemented */ }
 
   virtual void ClearRect(const Rect &aRect);
   virtual void MaskSurface(const Pattern &aSource,
                            SourceSurface *aMask,
                            Point aOffset,
                            const DrawOptions &aOptions = DrawOptions());
 
   virtual void CopySurface(SourceSurface *aSurface,
--- a/gfx/thebes/gfxContext.cpp
+++ b/gfx/thebes/gfxContext.cpp
@@ -456,17 +456,17 @@ gfxContext::SetDash(gfxFloat *dashes, in
 }
 
 bool
 gfxContext::CurrentDash(FallibleTArray<gfxFloat>& dashes, gfxFloat* offset) const
 {
   const AzureState &state = CurrentState();
   int count = state.strokeOptions.mDashLength;
 
-  if (count <= 0 || !dashes.SetLength(count)) {
+  if (count <= 0 || !dashes.SetLength(count, fallible)) {
     return false;
   }
 
   for (int i = 0; i < count; i++) {
     dashes[i] = state.dashPattern[i];
   }
 
   *offset = state.strokeOptions.mDashOffset;
--- a/gfx/thebes/gfxCoreTextShaper.cpp
+++ b/gfx/thebes/gfxCoreTextShaper.cpp
@@ -310,17 +310,17 @@ gfxCoreTextShaper::SetGlyphsFromRun(gfxS
     // We set the glyph index to NO_GLYPH for chars that have no associated glyph, and we
     // record the last glyph index for cases where the char maps to several glyphs,
     // so that our clumping will include all the glyph fragments for the character.
 
     // The charToGlyph array is indexed by char position within the stringRange of the glyph run.
 
     static const int32_t NO_GLYPH = -1;
     AutoFallibleTArray<int32_t,SMALL_GLYPH_RUN> charToGlyphArray;
-    if (!charToGlyphArray.SetLength(stringRange.length)) {
+    if (!charToGlyphArray.SetLength(stringRange.length, fallible)) {
         return NS_ERROR_OUT_OF_MEMORY;
     }
     int32_t *charToGlyph = charToGlyphArray.Elements();
     for (int32_t offset = 0; offset < stringRange.length; ++offset) {
         charToGlyph[offset] = NO_GLYPH;
     }
     for (int32_t i = 0; i < numGlyphs; ++i) {
         int32_t loc = glyphToChar[i] - stringRange.location;
--- a/gfx/thebes/gfxGraphiteShaper.cpp
+++ b/gfx/thebes/gfxGraphiteShaper.cpp
@@ -208,20 +208,20 @@ gfxGraphiteShaper::SetGlyphsFromSegment(
     uint32_t glyphCount = gr_seg_n_slots(aSegment);
 
     // identify clusters; graphite may have reordered/expanded/ligated glyphs.
     AutoFallibleTArray<Cluster,SMALL_GLYPH_RUN> clusters;
     AutoFallibleTArray<uint16_t,SMALL_GLYPH_RUN> gids;
     AutoFallibleTArray<float,SMALL_GLYPH_RUN> xLocs;
     AutoFallibleTArray<float,SMALL_GLYPH_RUN> yLocs;
 
-    if (!clusters.SetLength(aLength) ||
-        !gids.SetLength(glyphCount) ||
-        !xLocs.SetLength(glyphCount) ||
-        !yLocs.SetLength(glyphCount))
+    if (!clusters.SetLength(aLength, fallible) ||
+        !gids.SetLength(glyphCount, fallible) ||
+        !xLocs.SetLength(glyphCount, fallible) ||
+        !yLocs.SetLength(glyphCount, fallible))
     {
         return NS_ERROR_OUT_OF_MEMORY;
     }
 
     // walk through the glyph slots and check which original character
     // each is associated with
     uint32_t gIndex = 0; // glyph slot index
     uint32_t cIndex = 0; // current cluster index
--- a/gfx/thebes/gfxHarfBuzzShaper.cpp
+++ b/gfx/thebes/gfxHarfBuzzShaper.cpp
@@ -1529,17 +1529,17 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxC
         return NS_OK;
     }
 
     nsAutoTArray<gfxTextRun::DetailedGlyph,1> detailedGlyphs;
 
     uint32_t wordLength = aLength;
     static const int32_t NO_GLYPH = -1;
     AutoFallibleTArray<int32_t,SMALL_GLYPH_RUN> charToGlyphArray;
-    if (!charToGlyphArray.SetLength(wordLength)) {
+    if (!charToGlyphArray.SetLength(wordLength, fallible)) {
         return NS_ERROR_OUT_OF_MEMORY;
     }
 
     int32_t *charToGlyph = charToGlyphArray.Elements();
     for (uint32_t offset = 0; offset < wordLength; ++offset) {
         charToGlyph[offset] = NO_GLYPH;
     }
 
--- a/gfx/thebes/gfxUserFontSet.cpp
+++ b/gfx/thebes/gfxUserFontSet.cpp
@@ -365,17 +365,17 @@ CopyWOFFMetadata(const uint8_t* aFontDat
     uint32_t metaOffset = woff->metaOffset;
     uint32_t metaCompLen = woff->metaCompLen;
     if (!metaOffset || !metaCompLen || !woff->metaOrigLen) {
         return;
     }
     if (metaOffset >= aLength || metaCompLen > aLength - metaOffset) {
         return;
     }
-    if (!aMetadata->SetLength(woff->metaCompLen)) {
+    if (!aMetadata->SetLength(woff->metaCompLen, fallible)) {
         return;
     }
     memcpy(aMetadata->Elements(), aFontData + metaOffset, metaCompLen);
     *aMetaOrigLen = woff->metaOrigLen;
 }
 
 void
 gfxUserFontEntry::LoadNextSrc()
--- a/intl/hyphenation/nsHyphenator.cpp
+++ b/intl/hyphenation/nsHyphenator.cpp
@@ -41,17 +41,17 @@ nsHyphenator::IsValid()
 {
   return (mDict != nullptr);
 }
 
 nsresult
 nsHyphenator::Hyphenate(const nsAString& aString,
                         FallibleTArray<bool>& aHyphens)
 {
-  if (!aHyphens.SetLength(aString.Length())) {
+  if (!aHyphens.SetLength(aString.Length(), mozilla::fallible)) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
   memset(aHyphens.Elements(), false, aHyphens.Length() * sizeof(bool));
 
   bool inWord = false;
   uint32_t wordStart = 0, wordLimit = 0;
   uint32_t chLen;
   for (uint32_t i = 0; i < aString.Length(); i += chLen) {
--- a/intl/uconv/nsConverterInputStream.cpp
+++ b/intl/uconv/nsConverterInputStream.cpp
@@ -40,18 +40,18 @@ nsConverterInputStream::Init(nsIInputStr
         encoding.Assign(label);
     } else if (!EncodingUtils::FindEncodingForLabelNoReplacement(label,
                                                                  encoding)) {
       return NS_ERROR_UCONV_NOCONV;
     }
     mConverter = EncodingUtils::DecoderForEncoding(encoding);
  
     // set up our buffers
-    if (!mByteData.SetCapacity(aBufferSize) ||
-        !mUnicharData.SetCapacity(aBufferSize)) {
+    if (!mByteData.SetCapacity(aBufferSize, mozilla::fallible) ||
+        !mUnicharData.SetCapacity(aBufferSize, mozilla::fallible)) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
 
     mInput = aStream;
     mReplacementChar = aReplacementChar;
     if (!aReplacementChar ||
         aReplacementChar != mConverter->GetCharacterForUnMapped()) {
         mConverter->SetInputErrorBehavior(nsIUnicodeDecoder::kOnError_Signal);
--- a/ipc/glue/IPCMessageUtils.h
+++ b/ipc/glue/IPCMessageUtils.h
@@ -515,17 +515,17 @@ struct ParamTraits<FallibleTArray<E> >
 
       E* elements = aResult->AppendElements(length);
       if (!elements) {
         return false;
       }
 
       memcpy(elements, outdata, pickledLength);
     } else {
-      if (!aResult->SetCapacity(length)) {
+      if (!aResult->SetCapacity(length, mozilla::fallible)) {
         return false;
       }
 
       for (uint32_t index = 0; index < length; index++) {
         E* element = aResult->AppendElement();
         MOZ_ASSERT(element);
         if (!ReadParam(aMsg, aIter, element)) {
           return false;
--- a/ipc/ipdl/ipdl/lower.py
+++ b/ipc/ipdl/ipdl/lower.py
@@ -338,17 +338,17 @@ def _cxxArrayType(basetype, const=0, ref
 def _cxxFallibleArrayType(basetype, const=0, ref=0):
     return Type('FallibleTArray', T=basetype, const=const, ref=ref)
 
 def _callCxxArrayLength(arr):
     return ExprCall(ExprSelect(arr, '.', 'Length'))
 
 def _callCxxCheckedArraySetLength(arr, lenexpr, sel='.'):
     ifbad = StmtIf(ExprNot(ExprCall(ExprSelect(arr, sel, 'SetLength'),
-                                    args=[ lenexpr ])))
+                                    args=[ lenexpr, ExprVar('mozilla::fallible') ])))
     ifbad.addifstmt(_fatalError('Error setting the array length'))
     ifbad.addifstmt(StmtReturn.FALSE)
     return ifbad
 
 def _callCxxSwapArrayElements(arr1, arr2, sel='.'):
     return ExprCall(ExprSelect(arr1, sel, 'SwapElements'),
                     args=[ arr2 ])
 
--- a/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js
+++ b/js/src/jit-test/tests/saved-stacks/async-max-frame-count.js
@@ -8,17 +8,24 @@ function recur(n, limit) {
                                       saveStack(limit), "Recurse");
   }
   return saveStack(limit);
 }
 
 function checkRecursion(n, limit) {
   print("checkRecursion(" + uneval(n) + ", " + uneval(limit) + ")");
 
-  var stack = recur(n, limit);
+  try {
+    var stack = recur(n, limit);
+  } catch (e) {
+    // Some platforms, like ASAN builds, can end up overrecursing. Tolerate
+    // these failures.
+    assertEq(/too much recursion/.test("" + e), true);
+    return;
+  }
 
   // Async stacks are limited even if we didn't ask for a limit. There is a
   // default limit on frames attached on top of any synchronous frames. In this
   // case the synchronous frame is the last call to `recur`.
   if (limit == 0) {
     limit = defaultAsyncStackLimit + 1;
   }
 
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -7,113 +7,824 @@
 #include "jit/BacktrackingAllocator.h"
 #include "jit/BitSet.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::DebugOnly;
 
+/////////////////////////////////////////////////////////////////////
+// Utility
+/////////////////////////////////////////////////////////////////////
+
+static inline bool
+SortBefore(UsePosition* a, UsePosition* b)
+{
+    return a->pos <= b->pos;
+}
+
+static inline bool
+SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b)
+{
+    LiveRange* rangea = LiveRange::get(a);
+    LiveRange* rangeb = LiveRange::get(b);
+    MOZ_ASSERT(!rangea->intersects(rangeb));
+    return rangea->from() < rangeb->from();
+}
+
+static inline bool
+SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b)
+{
+    return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
+}
+
+template <typename T>
+static inline void
+InsertSortedList(InlineForwardList<T> &list, T* value)
+{
+    if (list.empty()) {
+        list.pushFront(value);
+        return;
+    }
+
+    if (SortBefore(list.back(), value)) {
+        list.pushBack(value);
+        return;
+    }
+
+    T* prev = nullptr;
+    for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
+        if (SortBefore(value, *iter))
+            break;
+        prev = *iter;
+    }
+
+    if (prev)
+        list.insertAfter(prev, value);
+    else
+        list.pushFront(value);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveRange
+/////////////////////////////////////////////////////////////////////
+
+void
+LiveRange::addUse(UsePosition* use)
+{
+    MOZ_ASSERT(covers(use->pos));
+    InsertSortedList(uses_, use);
+}
+
+void
+LiveRange::distributeUses(LiveRange* other)
+{
+    MOZ_ASSERT(other->vreg() == vreg());
+    MOZ_ASSERT(this != other);
+
+    // Move over all uses which fit in |other|'s boundaries.
+    for (UsePositionIterator iter = usesBegin(); iter; ) {
+        UsePosition* use = *iter;
+        if (other->covers(use->pos)) {
+            uses_.removeAndIncrement(iter);
+            other->addUse(use);
+        } else {
+            iter++;
+        }
+    }
+
+    // Distribute the definition to |other| as well, if possible.
+    if (hasDefinition() && from() == other->from())
+        other->setHasDefinition();
+}
+
+bool
+LiveRange::contains(LiveRange* other) const
+{
+    return from() <= other->from() && to() >= other->to();
+}
+
+void
+LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const
+{
+    MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
+
+    CodePosition innerFrom = from();
+    if (from() < other->from()) {
+        if (to() < other->from()) {
+            *pre = range_;
+            return;
+        }
+        *pre = Range(from(), other->from());
+        innerFrom = other->from();
+    }
+
+    CodePosition innerTo = to();
+    if (to() > other->to()) {
+        if (from() >= other->to()) {
+            *post = range_;
+            return;
+        }
+        *post = Range(other->to(), to());
+        innerTo = other->to();
+    }
+
+    if (innerFrom != innerTo)
+        *inside = Range(innerFrom, innerTo);
+}
+
+bool
+LiveRange::intersects(LiveRange* other) const
+{
+    Range pre, inside, post;
+    intersect(other, &pre, &inside, &post);
+    return !inside.empty();
+}
+
+/////////////////////////////////////////////////////////////////////
+// SpillSet
+/////////////////////////////////////////////////////////////////////
+
+void
+SpillSet::setAllocation(LAllocation alloc)
+{
+    for (size_t i = 0; i < numSpilledBundles(); i++)
+        spilledBundle(i)->setAllocation(alloc);
+}
+
+/////////////////////////////////////////////////////////////////////
+// LiveBundle
+/////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+size_t
+LiveBundle::numRanges() const
+{
+    size_t count = 0;
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++)
+        count++;
+    return count;
+}
+#endif // DEBUG
+
+LiveRange*
+LiveBundle::rangeFor(CodePosition pos) const
+{
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (range->covers(pos))
+            return range;
+    }
+    return nullptr;
+}
+
+void
+LiveBundle::addRange(LiveRange* range)
+{
+    MOZ_ASSERT(!range->bundle());
+    range->setBundle(this);
+    InsertSortedList(ranges_, &range->bundleLink);
+}
+
+bool
+LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc, vreg, from, to);
+    if (!range)
+        return false;
+    addRange(range);
+    return true;
+}
+
+bool
+LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+                                      CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc, oldRange->vreg(), from, to);
+    if (!range)
+        return false;
+    addRange(range);
+    oldRange->distributeUses(range);
+    return true;
+}
+
+LiveRange*
+LiveBundle::popFirstRange()
+{
+    LiveRange::BundleLinkIterator iter = rangesBegin();
+    if (!iter)
+        return nullptr;
+
+    LiveRange* range = LiveRange::get(*iter);
+    ranges_.removeAt(iter);
+
+    range->setBundle(nullptr);
+    return range;
+}
+
+void
+LiveBundle::removeRange(LiveRange* range)
+{
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* existing = LiveRange::get(*iter);
+        if (existing == range) {
+            ranges_.removeAt(iter);
+            return;
+        }
+    }
+    MOZ_CRASH();
+}
+
+/////////////////////////////////////////////////////////////////////
+// VirtualRegister
+/////////////////////////////////////////////////////////////////////
+
+bool
+VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to)
+{
+    MOZ_ASSERT(from < to);
+
+    // Mark [from,to) as a live range for this register during the initial
+    // liveness analysis, coalescing with any existing overlapping ranges.
+
+    LiveRange* prev = nullptr;
+    LiveRange* merged = nullptr;
+    for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) {
+        LiveRange* existing = LiveRange::get(*iter);
+
+        if (from > existing->to()) {
+            // The new range should go after this one.
+            prev = existing;
+            iter++;
+            continue;
+        }
+
+        if (to.next() < existing->from()) {
+            // The new range should go before this one.
+            break;
+        }
+
+        if (!merged) {
+            // This is the first old range we've found that overlaps the new
+            // range. Extend this one to cover its union with the new range.
+            merged = existing;
+
+            if (from < existing->from())
+                existing->setFrom(from);
+            if (to > existing->to())
+                existing->setTo(to);
+
+            // Continue searching to see if any other old ranges can be
+            // coalesced with the new merged range.
+            iter++;
+            continue;
+        }
+
+        // Coalesce this range into the previous range we merged into.
+        MOZ_ASSERT(existing->from() >= merged->from());
+        if (existing->to() > merged->to())
+            merged->setTo(existing->to());
+
+        MOZ_ASSERT(!existing->hasDefinition());
+        existing->distributeUses(merged);
+        MOZ_ASSERT(!existing->hasUses());
+
+        ranges_.removeAndIncrement(iter);
+    }
+
+    if (!merged) {
+        // The new range does not overlap any existing range for the vreg.
+        LiveRange* range = LiveRange::New(alloc, vreg(), from, to);
+        if (!range)
+            return false;
+
+        if (prev)
+            ranges_.insertAfter(&prev->registerLink, &range->registerLink);
+        else
+            ranges_.pushFront(&range->registerLink);
+    }
+
+    return true;
+}
+
+void
+VirtualRegister::addInitialUse(UsePosition* use)
+{
+    LiveRange::get(*rangesBegin())->addUse(use);
+}
+
+void
+VirtualRegister::setInitialDefinition(CodePosition from)
+{
+    LiveRange* first = LiveRange::get(*rangesBegin());
+    MOZ_ASSERT(from >= first->from());
+    first->setFrom(from);
+    first->setHasDefinition();
+}
+
+LiveRange*
+VirtualRegister::rangeFor(CodePosition pos) const
+{
+    for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (range->covers(pos))
+            return range;
+    }
+    return nullptr;
+}
+
+void
+VirtualRegister::addRange(LiveRange* range)
+{
+    InsertSortedList(ranges_, &range->registerLink);
+}
+
+void
+VirtualRegister::removeRange(LiveRange* range)
+{
+    for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
+        LiveRange* existing = LiveRange::get(*iter);
+        if (existing == range) {
+            ranges_.removeAt(iter);
+            return;
+        }
+    }
+    MOZ_CRASH();
+}
+
+/////////////////////////////////////////////////////////////////////
+// BacktrackingAllocator
+/////////////////////////////////////////////////////////////////////
+
+// This function pre-allocates and initializes as much global state as possible
+// to avoid littering the algorithms with memory management cruft.
 bool
 BacktrackingAllocator::init()
 {
+    if (!RegisterAllocator::init())
+        return false;
+
+    liveIn = mir->allocate<BitSet>(graph.numBlockIds());
+    if (!liveIn)
+        return false;
+
+    callRanges = LiveBundle::New(alloc(), nullptr, nullptr);
+
+    size_t numVregs = graph.numVirtualRegisters();
+    if (!vregs.init(mir->alloc(), numVregs))
+        return false;
+    memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs);
+    for (uint32_t i = 0; i < numVregs; i++)
+        new(&vregs[i]) VirtualRegister();
+
+    // Build virtual register objects.
+    for (size_t i = 0; i < graph.numBlocks(); i++) {
+        if (mir->shouldCancel("Create data structures (main loop)"))
+            return false;
+
+        LBlock* block = graph.getBlock(i);
+        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+            for (size_t j = 0; j < ins->numDefs(); j++) {
+                LDefinition* def = ins->getDef(j);
+                if (def->isBogusTemp())
+                    continue;
+                vreg(def).init(*ins, def, /* isTemp = */ false);
+            }
+
+            for (size_t j = 0; j < ins->numTemps(); j++) {
+                LDefinition* def = ins->getTemp(j);
+                if (def->isBogusTemp())
+                    continue;
+                vreg(def).init(*ins, def, /* isTemp = */ true);
+            }
+        }
+        for (size_t j = 0; j < block->numPhis(); j++) {
+            LPhi* phi = block->getPhi(j);
+            LDefinition* def = phi->getDef(0);
+            vreg(def).init(phi, def, /* isTemp = */ false);
+        }
+    }
+
     LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
     while (!remainingRegisters.emptyGeneral()) {
         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
         registers[reg.code()].allocatable = true;
     }
     while (!remainingRegisters.emptyFloat()) {
         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyFloat());
         registers[reg.code()].allocatable = true;
     }
 
     LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         registers[i].reg = AnyRegister::FromCode(i);
         registers[i].allocations.setAllocator(lifoAlloc);
-
-        LiveInterval* fixed = fixedIntervals[i];
-        for (size_t j = 0; j < fixed->numRanges(); j++) {
-            AllocatedRange range(fixed, fixed->getRange(j));
-            if (!registers[i].allocations.insert(range))
-                return false;
-        }
     }
 
     hotcode.setAllocator(lifoAlloc);
 
     // Partition the graph into hot and cold sections, for helping to make
     // splitting decisions. Since we don't have any profiling data this is a
     // crapshoot, so just mark the bodies of inner loops as hot and everything
     // else as cold.
 
-    LiveInterval* hotcodeInterval = LiveInterval::New(alloc(), 0);
-
     LBlock* backedge = nullptr;
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
 
         // If we see a loop header, mark the backedge so we know when we have
         // hit the end of the loop. Don't process the loop immediately, so that
         // if there is an inner loop we will ignore the outer backedge.
         if (block->mir()->isLoopHeader())
             backedge = block->mir()->backedge()->lir();
 
         if (block == backedge) {
             LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
-            CodePosition from = entryOf(header);
-            CodePosition to = exitOf(block).next();
-            if (!hotcodeInterval->addRange(from, to))
+            LiveRange* range = LiveRange::New(alloc(), 0, entryOf(header), exitOf(block).next());
+            if (!range || !hotcode.insert(range))
                 return false;
         }
     }
 
-    for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) {
-        AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i));
-        if (!hotcode.insert(range))
+    return true;
+}
+
+bool
+BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to)
+{
+    LiveRange* range = LiveRange::New(alloc(), 0, from, to);
+    return range && registers[reg.code()].allocations.insert(range);
+}
+
+#ifdef DEBUG
+// Returns true iff ins has a def/temp reusing the input allocation.
+static bool
+IsInputReused(LInstruction* ins, LUse* use)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
+        {
+            return true;
+        }
+    }
+
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
+        {
+            return true;
+        }
+    }
+
+    return false;
+}
+#endif
+
+/*
+ * This function builds up liveness ranges for all virtual registers
+ * defined in the function. Additionally, it populates the liveIn array with
+ * information about which registers are live at the beginning of a block, to
+ * aid resolution and reification in a later phase.
+ *
+ * The algorithm is based on the one published in:
+ *
+ * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
+ *     SSA Form." Proceedings of the International Symposium on Code Generation
+ *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
+ *
+ * The algorithm operates on blocks ordered such that dominators of a block
+ * are before the block itself, and such that all blocks of a loop are
+ * contiguous. It proceeds backwards over the instructions in this order,
+ * marking registers live at their uses, ending their live ranges at
+ * definitions, and recording which registers are live at the top of every
+ * block. To deal with loop backedges, registers live at the beginning of
+ * a loop gain a range covering the entire loop.
+ */
+bool
+BacktrackingAllocator::buildLivenessInfo()
+{
+    JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
+
+    Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
+    BitSet loopDone(graph.numBlockIds());
+    if (!loopDone.init(alloc()))
+        return false;
+
+    for (size_t i = graph.numBlocks(); i > 0; i--) {
+        if (mir->shouldCancel("Build Liveness Info (main loop)"))
+            return false;
+
+        LBlock* block = graph.getBlock(i - 1);
+        MBasicBlock* mblock = block->mir();
+
+        BitSet& live = liveIn[mblock->id()];
+        new (&live) BitSet(graph.numVirtualRegisters());
+        if (!live.init(alloc()))
             return false;
+
+        // Propagate liveIn from our successors to us.
+        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
+            MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
+            // Skip backedges, as we fix them up at the loop header.
+            if (mblock->id() < successor->id())
+                live.insertAll(liveIn[successor->id()]);
+        }
+
+        // Add successor phis.
+        if (mblock->successorWithPhis()) {
+            LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
+            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
+                LPhi* phi = phiSuccessor->getPhi(j);
+                LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
+                uint32_t reg = use->toUse()->virtualRegister();
+                live.insert(reg);
+            }
+        }
+
+        // Registers are assumed alive for the entire block, a define shortens
+        // the range to the point of definition.
+        for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+            if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next()))
+                return false;
+        }
+
+        // Shorten the front end of ranges for live variables to their point of
+        // definition, if found.
+        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
+            // Calls may clobber registers, so force a spill and reload around the callsite.
+            if (ins->isCall()) {
+                for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) {
+                    bool found = false;
+                    for (size_t i = 0; i < ins->numDefs(); i++) {
+                        if (ins->getDef(i)->isFixed() &&
+                            ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
+                            found = true;
+                            break;
+                        }
+                    }
+                    if (!found) {
+                        if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next()))
+                            return false;
+                    }
+                }
+                if (!callRanges->addRange(alloc(), 0, outputOf(*ins), outputOf(*ins).next()))
+                    return false;
+            }
+            DebugOnly<bool> hasDoubleDef = false;
+            DebugOnly<bool> hasFloat32Def = false;
+            for (size_t i = 0; i < ins->numDefs(); i++) {
+                LDefinition* def = ins->getDef(i);
+                if (def->isBogusTemp())
+                    continue;
+#ifdef DEBUG
+                if (def->type() == LDefinition::DOUBLE)
+                    hasDoubleDef = true;
+                if (def->type() == LDefinition::FLOAT32)
+                    hasFloat32Def = true;
+#endif
+                CodePosition from = outputOf(*ins);
+
+                if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
+                    // MUST_REUSE_INPUT is implemented by allocating an output
+                    // register and moving the input to it. Register hints are
+                    // used to avoid unnecessary moves. We give the input an
+                    // LUse::ANY policy to avoid allocating a register for the
+                    // input.
+                    LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
+                    MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
+                    MOZ_ASSERT(inputUse->usedAtStart());
+                    *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
+                }
+
+                if (!vreg(def).addInitialRange(alloc(), from, from.next()))
+                    return false;
+                vreg(def).setInitialDefinition(from);
+                live.remove(def->virtualRegister());
+            }
+
+            for (size_t i = 0; i < ins->numTemps(); i++) {
+                LDefinition* temp = ins->getTemp(i);
+                if (temp->isBogusTemp())
+                    continue;
+
+                // Normally temps are considered to cover both the input
+                // and output of the associated instruction. In some cases
+                // though we want to use a fixed register as both an input
+                // and clobbered register in the instruction, so watch for
+                // this and shorten the temp to cover only the output.
+                CodePosition from = inputOf(*ins);
+                if (temp->policy() == LDefinition::FIXED) {
+                    AnyRegister reg = temp->output()->toRegister();
+                    for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
+                        if (alloc->isUse()) {
+                            LUse* use = alloc->toUse();
+                            if (use->isFixedRegister()) {
+                                if (GetFixedRegister(vreg(use).def(), use) == reg)
+                                    from = outputOf(*ins);
+                            }
+                        }
+                    }
+                }
+
+                CodePosition to =
+                    ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
+
+                if (!vreg(temp).addInitialRange(alloc(), from, to))
+                    return false;
+                vreg(temp).setInitialDefinition(from);
+            }
+
+            DebugOnly<bool> hasUseRegister = false;
+            DebugOnly<bool> hasUseRegisterAtStart = false;
+
+            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
+                if (inputAlloc->isUse()) {
+                    LUse* use = inputAlloc->toUse();
+
+                    // Call uses should always be at-start or fixed, since
+                    // calls use all registers.
+                    MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
+                                  use->isFixedRegister() || use->usedAtStart());
+
+#ifdef DEBUG
+                    // Don't allow at-start call uses if there are temps of the same kind,
+                    // so that we don't assign the same register.
+                    if (ins->isCall() && use->usedAtStart()) {
+                        for (size_t i = 0; i < ins->numTemps(); i++)
+                            MOZ_ASSERT(vreg(ins->getTemp(i)).type() != vreg(use).type());
+                    }
+
+                    // If there are both useRegisterAtStart(x) and useRegister(y)
+                    // uses, we may assign the same register to both operands
+                    // (bug 772830). Don't allow this for now.
+                    if (use->policy() == LUse::REGISTER) {
+                        if (use->usedAtStart()) {
+                            if (!IsInputReused(*ins, use))
+                                hasUseRegisterAtStart = true;
+                        } else {
+                            hasUseRegister = true;
+                        }
+                    }
+                    MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
+#endif
+
+                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
+                    if (use->policy() == LUse::RECOVERED_INPUT)
+                        continue;
+
+                    // Fixed uses on calls are specially overridden to happen
+                    // at the input position.
+                    CodePosition to =
+                        (use->usedAtStart() || (ins->isCall() && use->isFixedRegister()))
+                        ? inputOf(*ins)
+                        : outputOf(*ins);
+                    if (use->isFixedRegister()) {
+                        LAllocation reg(AnyRegister::FromCode(use->registerCode()));
+                        for (size_t i = 0; i < ins->numDefs(); i++) {
+                            LDefinition* def = ins->getDef(i);
+                            if (def->policy() == LDefinition::FIXED && *def->output() == reg)
+                                to = inputOf(*ins);
+                        }
+                    }
+
+                    if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next()))
+                        return false;
+                    UsePosition* usePosition = new(alloc()) UsePosition(use, to);
+                    if (!usePosition)
+                        return false;
+                    vreg(use).addInitialUse(usePosition);
+                    live.insert(use->virtualRegister());
+                }
+            }
+        }
+
+        // Phis have simultaneous assignment semantics at block begin, so at
+        // the beginning of the block we can be sure that liveIn does not
+        // contain any phi outputs.
+        for (unsigned int i = 0; i < block->numPhis(); i++) {
+            LDefinition* def = block->getPhi(i)->getDef(0);
+            if (live.contains(def->virtualRegister())) {
+                live.remove(def->virtualRegister());
+            } else {
+                // This is a dead phi, so add a dummy range over all phis. This
+                // can go away if we have an earlier dead code elimination pass.
+                CodePosition entryPos = entryOf(block);
+                if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next()))
+                    return false;
+            }
+        }
+
+        if (mblock->isLoopHeader()) {
+            // A divergence from the published algorithm is required here, as
+            // our block order does not guarantee that blocks of a loop are
+            // contiguous. As a result, a single live range spanning the
+            // loop is not possible. Additionally, we require liveIn in a later
+            // pass for resolution, so that must also be fixed up here.
+            MBasicBlock* loopBlock = mblock->backedge();
+            while (true) {
+                // Blocks must already have been visited to have a liveIn set.
+                MOZ_ASSERT(loopBlock->id() >= mblock->id());
+
+                // Add a range for this entire loop block
+                CodePosition from = entryOf(loopBlock->lir());
+                CodePosition to = exitOf(loopBlock->lir()).next();
+
+                for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
+                    if (!vregs[*liveRegId].addInitialRange(alloc(), from, to))
+                        return false;
+                }
+
+                // Fix up the liveIn set.
+                liveIn[loopBlock->id()].insertAll(live);
+
+                // Make sure we don't visit this node again
+                loopDone.insert(loopBlock->id());
+
+                // If this is the loop header, any predecessors are either the
+                // backedge or out of the loop, so skip any predecessors of
+                // this block
+                if (loopBlock != mblock) {
+                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
+                        MBasicBlock* pred = loopBlock->getPredecessor(i);
+                        if (loopDone.contains(pred->id()))
+                            continue;
+                        if (!loopWorkList.append(pred))
+                            return false;
+                    }
+                }
+
+                // Terminate loop if out of work.
+                if (loopWorkList.empty())
+                    break;
+
+                // Grab the next block off the work list, skipping any OSR block.
+                MBasicBlock* osrBlock = graph.mir().osrBlock();
+                while (!loopWorkList.empty()) {
+                    loopBlock = loopWorkList.popCopy();
+                    if (loopBlock != osrBlock)
+                        break;
+                }
+
+                // If end is reached without finding a non-OSR block, then no more work items were found.
+                if (loopBlock == osrBlock) {
+                    MOZ_ASSERT(loopWorkList.empty());
+                    break;
+                }
+            }
+
+            // Clear the done set for other loops
+            loopDone.clear();
+        }
+
+        MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
     }
 
+    JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
+
+    if (JitSpewEnabled(JitSpew_RegAlloc))
+        dumpInstructions();
+
     return true;
 }
 
 bool
 BacktrackingAllocator::go()
 {
     JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
 
-    if (!buildLivenessInfo())
+    if (!init())
         return false;
 
-    if (!init())
+    if (!buildLivenessInfo())
         return false;
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
         dumpFixedRanges();
 
     if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
         return false;
 
     JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
-    if (!groupAndQueueRegisters())
+    if (!mergeAndQueueRegisters())
         return false;
-    JitSpew(JitSpew_RegAlloc, "Grouping and queueing registers complete");
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
-        dumpRegisterGroups();
+        dumpVregs();
 
     JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
 
-    // Allocate, spill and split register intervals until finished.
+    // Allocate, spill and split bundles until finished.
     while (!allocationQueue.empty()) {
         if (mir->shouldCancel("Backtracking Allocation"))
             return false;
 
         QueueItem item = allocationQueue.removeHighest();
-        if (item.interval ? !processInterval(item.interval) : !processGroup(item.group))
+        if (!processBundle(item.bundle))
             return false;
     }
     JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
 
     if (!pickStackSlots())
         return false;
 
     if (JitSpewEnabled(JitSpew_RegAlloc))
@@ -130,733 +841,572 @@ BacktrackingAllocator::go()
 
     if (!annotateMoveGroups())
         return false;
 
     return true;
 }
 
 static bool
-LifetimesOverlap(BacktrackingVirtualRegister* reg0, BacktrackingVirtualRegister* reg1)
-{
-    // Registers may have been eagerly split in two, see tryGroupReusedRegister.
-    // In such cases, only consider the first interval.
-    MOZ_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2);
-
-    LiveInterval* interval0 = reg0->getInterval(0);
-    LiveInterval* interval1 = reg1->getInterval(0);
-
-    // Interval ranges are sorted in reverse order. The lifetimes overlap if
-    // any of their ranges overlap.
-    size_t index0 = 0, index1 = 0;
-    while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) {
-        const LiveInterval::Range
-            *range0 = interval0->getRange(index0),
-            *range1 = interval1->getRange(index1);
-        if (range0->from >= range1->to)
-            index0++;
-        else if (range1->from >= range0->to)
-            index1++;
-        else
-            return true;
-    }
-
-    return false;
-}
-
-bool
-BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg)
-{
-    for (size_t i = 0; i < group->registers.length(); i++) {
-        if (LifetimesOverlap(reg, &vregs[group->registers[i]]))
-            return false;
-    }
-    return true;
-}
-
-static bool
 IsArgumentSlotDefinition(LDefinition* def)
 {
     return def->policy() == LDefinition::FIXED && def->output()->isArgument();
 }
 
 static bool
 IsThisSlotDefinition(LDefinition* def)
 {
     return IsArgumentSlotDefinition(def) &&
-           def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
+        def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
 }
 
 bool
-BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
+BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1)
 {
-    // See if reg0 and reg1 can be placed in the same group, following the
-    // restrictions imposed by VirtualRegisterGroup and any other registers
-    // already grouped with reg0 or reg1.
-    BacktrackingVirtualRegister* reg0 = &vregs[vreg0];
-    BacktrackingVirtualRegister* reg1 = &vregs[vreg1];
-
-    if (!reg0->isCompatibleVReg(*reg1))
+    // See if bundle0 and bundle1 can be merged together.
+    if (bundle0 == bundle1)
+        return true;
+
+    // Get a representative virtual register from each bundle.
+    VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()];
+    VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()];
+
+    if (!reg0.isCompatible(reg1))
         return true;
 
     // Registers which might spill to the frame's |this| slot can only be
     // grouped with other such registers. The frame's |this| slot must always
     // hold the |this| value, as required by JitFrame tracing and by the Ion
     // constructor calling convention.
-    if (IsThisSlotDefinition(reg0->def()) || IsThisSlotDefinition(reg1->def())) {
-        if (*reg0->def()->output() != *reg1->def()->output())
+    if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
+        if (*reg0.def()->output() != *reg1.def()->output())
             return true;
     }
 
     // Registers which might spill to the frame's argument slots can only be
     // grouped with other such registers if the frame might access those
     // arguments through a lazy arguments object.
-    if (IsArgumentSlotDefinition(reg0->def()) || IsArgumentSlotDefinition(reg1->def())) {
+    if (IsArgumentSlotDefinition(reg0.def()) || IsArgumentSlotDefinition(reg1.def())) {
         JSScript* script = graph.mir().entryBlock()->info().script();
         if (script && script->argumentsHasVarBinding()) {
-            if (*reg0->def()->output() != *reg1->def()->output())
+            if (*reg0.def()->output() != *reg1.def()->output())
                 return true;
         }
     }
 
-    VirtualRegisterGroup* group0 = reg0->group();
-    VirtualRegisterGroup* group1 = reg1->group();
-
-    if (!group0 && group1)
-        return tryGroupRegisters(vreg1, vreg0);
-
-    if (group0) {
-        if (group1) {
-            if (group0 == group1) {
-                // The registers are already grouped together.
-                return true;
-            }
-            // Try to unify the two distinct groups.
-            for (size_t i = 0; i < group1->registers.length(); i++) {
-                if (!canAddToGroup(group0, &vregs[group1->registers[i]]))
-                    return true;
-            }
-            for (size_t i = 0; i < group1->registers.length(); i++) {
-                uint32_t vreg = group1->registers[i];
-                if (!group0->registers.append(vreg))
-                    return false;
-                vregs[vreg].setGroup(group0);
-            }
+    // Make sure that ranges in the bundles do not overlap.
+    LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(), iter1 = bundle1->rangesBegin();
+    while (iter0 && iter1) {
+        LiveRange* range0 = LiveRange::get(*iter0);
+        LiveRange* range1 = LiveRange::get(*iter1);
+
+        if (range0->from() >= range1->to())
+            iter1++;
+        else if (range1->from() >= range0->to())
+            iter0++;
+        else
             return true;
-        }
-        if (!canAddToGroup(group0, reg1))
-            return true;
-        if (!group0->registers.append(vreg1))
-            return false;
-        reg1->setGroup(group0);
-        return true;
     }
 
-    if (LifetimesOverlap(reg0, reg1))
-        return true;
-
-    VirtualRegisterGroup* group = new(alloc()) VirtualRegisterGroup(alloc());
-    if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
-        return false;
-
-    reg0->setGroup(group);
-    reg1->setGroup(group);
+    // Move all ranges from bundle1 into bundle0.
+    while (LiveRange* range = bundle1->popFirstRange())
+        bundle0->addRange(range);
+
     return true;
 }
 
+static inline LDefinition*
+FindReusingDefinition(LNode* ins, LAllocation* alloc)
+{
+    for (size_t i = 0; i < ins->numDefs(); i++) {
+        LDefinition* def = ins->getDef(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    for (size_t i = 0; i < ins->numTemps(); i++) {
+        LDefinition* def = ins->getTemp(i);
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
+            ins->getOperand(def->getReusedInput()) == alloc)
+            return def;
+    }
+    return nullptr;
+}
+
 bool
-BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use)
+BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input)
 {
-    BacktrackingVirtualRegister& reg = vregs[def];
-    BacktrackingVirtualRegister& usedReg = vregs[use];
-
-    // reg is a vreg which reuses its input usedReg for its output physical
-    // register. Try to group reg with usedReg if at all possible, as avoiding
-    // copies before reg's instruction is crucial for the quality of the
-    // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions
-    // on x86/x64).
-
-    if (reg.intervalFor(inputOf(reg.ins()))) {
-        MOZ_ASSERT(reg.isTemp());
-        reg.setMustCopyInput();
+    // def is a vreg which reuses input for its output physical register. Try
+    // to merge ranges for def with those of input if possible, as avoiding
+    // copies before def's instruction is crucial for generated code quality
+    // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
+
+    if (def.rangeFor(inputOf(def.ins()))) {
+        MOZ_ASSERT(def.isTemp());
+        def.setMustCopyInput();
         return true;
     }
 
-    if (!usedReg.intervalFor(outputOf(reg.ins()))) {
+    LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
+    if (!inputRange) {
         // The input is not live after the instruction, either in a safepoint
         // for the instruction or in subsequent code. The input and output
         // can thus be in the same group.
-        return tryGroupRegisters(use, def);
+        return tryMergeBundles(def.firstBundle(), input.firstBundle());
     }
 
     // The input is live afterwards, either in future instructions or in a
     // safepoint for the reusing instruction. This is impossible to satisfy
     // without copying the input.
     //
-    // It may or may not be better to split the interval at the point of the
-    // definition, which may permit grouping. One case where it is definitely
-    // better to split is if the input never has any register uses after the
-    // instruction. Handle this splitting eagerly.
-
-    if (usedReg.numIntervals() != 1 ||
-        (usedReg.def()->isFixed() && !usedReg.def()->output()->isRegister())) {
-        reg.setMustCopyInput();
-        return true;
-    }
-    LiveInterval* interval = usedReg.getInterval(0);
-    LBlock* block = reg.ins()->block();
+    // It may or may not be better to split the input into two bundles at the
+    // point of the definition, which may permit merging. One case where it is
+    // definitely better to split is if the input never has any register uses
+    // after the instruction. Handle this splitting eagerly.
+
+    LBlock* block = def.ins()->block();
 
     // The input's lifetime must end within the same block as the definition,
     // otherwise it could live on in phis elsewhere.
-    if (interval->end() > exitOf(block)) {
-        reg.setMustCopyInput();
+    if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
+        def.setMustCopyInput();
+        return true;
+    }
+
+    // If we already split the input for some other register, don't make a
+    // third bundle.
+    if (inputRange->bundle() != input.firstRange()->bundle()) {
+        def.setMustCopyInput();
         return true;
     }
 
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
-        if (iter->pos <= inputOf(reg.ins()))
+    // If the input will start out in memory then adding a separate bundle for
+    // memory uses after the def won't help.
+    if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
+        def.setMustCopyInput();
+        return true;
+    }
+
+    // The input cannot have register or reused uses after the definition.
+    for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
+        if (iter->pos <= inputOf(def.ins()))
             continue;
 
         LUse* use = iter->use;
         if (FindReusingDefinition(insData[iter->pos], use)) {
-            reg.setMustCopyInput();
+            def.setMustCopyInput();
             return true;
         }
         if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
-            reg.setMustCopyInput();
+            def.setMustCopyInput();
             return true;
         }
     }
 
-    LiveInterval* preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        const LiveInterval::Range* range = interval->getRange(i);
-        MOZ_ASSERT(range->from <= inputOf(reg.ins()));
-
-        CodePosition to = Min(range->to, outputOf(reg.ins()));
-        if (!preInterval->addRange(range->from, to))
-            return false;
-    }
-
-    // The new interval starts at reg's input position, which means it overlaps
-    // with the old interval at one position. This is what we want, because we
+    LiveRange* preRange = LiveRange::New(alloc(), input.vreg(),
+                                         inputRange->from(), outputOf(def.ins()));
+    if (!preRange)
+        return false;
+
+    // The new range starts at reg's input position, which means it overlaps
+    // with the old range at one position. This is what we want, because we
     // need to copy the input before the instruction.
-    LiveInterval* postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
+    LiveRange* postRange = LiveRange::New(alloc(), input.vreg(),
+                                          inputOf(def.ins()), inputRange->to());
+    if (!postRange)
         return false;
 
-    LiveIntervalVector newIntervals;
-    if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
-        return false;
-
-    distributeUses(interval, newIntervals);
+    inputRange->distributeUses(preRange);
+    inputRange->distributeUses(postRange);
+    MOZ_ASSERT(!inputRange->hasUses());
 
     JitSpew(JitSpew_RegAlloc, "  splitting reused input at %u to try to help grouping",
-            inputOf(reg.ins()));
-
-    if (!split(interval, newIntervals))
+            inputOf(def.ins()));
+
+    LiveBundle* firstBundle = inputRange->bundle();
+    input.removeRange(inputRange);
+    input.addRange(preRange);
+    input.addRange(postRange);
+
+    firstBundle->removeRange(inputRange);
+    firstBundle->addRange(preRange);
+
+    // The new range goes in a separate bundle, where it will be spilled during
+    // allocation.
+    LiveBundle* secondBundle = LiveBundle::New(alloc(), nullptr, nullptr);
+    if (!secondBundle)
         return false;
-
-    MOZ_ASSERT(usedReg.numIntervals() == 2);
-
-    usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
-
-    return tryGroupRegisters(use, def);
+    secondBundle->addRange(postRange);
+
+    return tryMergeBundles(def.firstBundle(), input.firstBundle());
 }
 
 bool
-BacktrackingAllocator::groupAndQueueRegisters()
+BacktrackingAllocator::mergeAndQueueRegisters()
 {
-    // If there is an OSR block, group parameters in that block with the
+    MOZ_ASSERT(!vregs[0u].hasRanges());
+
+    // Create a bundle for each register containing all its ranges.
+    for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        VirtualRegister& reg = vregs[i];
+        if (!reg.hasRanges())
+            continue;
+
+        LiveBundle* bundle = LiveBundle::New(alloc(), nullptr, nullptr);
+        if (!bundle)
+            return false;
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            bundle->addRange(range);
+        }
+    }
+
+    // If there is an OSR block, merge parameters in that block with the
     // corresponding parameters in the initial block.
     if (MBasicBlock* osr = graph.mir().osrBlock()) {
-        size_t originalVreg = 1;
+        size_t original = 1;
         for (LInstructionIterator iter = osr->lir()->begin(); iter != osr->lir()->end(); iter++) {
             if (iter->isParameter()) {
                 for (size_t i = 0; i < iter->numDefs(); i++) {
                     DebugOnly<bool> found = false;
-                    uint32_t paramVreg = iter->getDef(i)->virtualRegister();
-                    for (; originalVreg < paramVreg; originalVreg++) {
-                        if (*vregs[originalVreg].def()->output() == *iter->getDef(i)->output()) {
-                            MOZ_ASSERT(vregs[originalVreg].ins()->isParameter());
-                            if (!tryGroupRegisters(originalVreg, paramVreg))
+                    VirtualRegister &paramVreg = vreg(iter->getDef(i));
+                    for (; original < paramVreg.vreg(); original++) {
+                        VirtualRegister &originalVreg = vregs[original];
+                        if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
+                            MOZ_ASSERT(originalVreg.ins()->isParameter());
+                            if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle()))
                                 return false;
-                            MOZ_ASSERT(vregs[originalVreg].group() == vregs[paramVreg].group());
                             found = true;
                             break;
                         }
                     }
                     MOZ_ASSERT(found);
                 }
             }
         }
     }
 
-    // Try to group registers with their reused inputs.
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    // Try to merge registers with their reused inputs.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister& reg = vregs[i];
-        if (!reg.numIntervals())
+        VirtualRegister& reg = vregs[i];
+        if (!reg.hasRanges())
             continue;
 
         if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
             LUse* use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
-            if (!tryGroupReusedRegister(i, use->virtualRegister()))
+            if (!tryMergeReusedRegister(reg, vreg(use)))
                 return false;
         }
     }
 
-    // Try to group phis with their inputs.
+    // Try to merge phis with their inputs.
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi* phi = block->getPhi(j);
-            uint32_t output = phi->getDef(0)->virtualRegister();
+            VirtualRegister &outputVreg = vreg(phi->getDef(0));
             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
-                uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
-                if (!tryGroupRegisters(input, output))
+                VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
+                if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle()))
                     return false;
             }
         }
     }
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    // Add all bundles to the allocation queue, and create spill sets for them.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        if (mir->shouldCancel("Backtracking Enqueue Registers"))
-            return false;
-
-        BacktrackingVirtualRegister& reg = vregs[i];
-        MOZ_ASSERT(reg.numIntervals() <= 2);
-        MOZ_ASSERT(!reg.canonicalSpill());
-
-        if (!reg.numIntervals())
-            continue;
-
-        // Eagerly set the canonical spill slot for registers which are fixed
-        // for that slot, and reuse it for other registers in the group.
-        LDefinition* def = reg.def();
-        if (def->policy() == LDefinition::FIXED && !def->output()->isRegister()) {
-            MOZ_ASSERT(!def->output()->isStackSlot());
-            reg.setCanonicalSpill(*def->output());
-            if (reg.group() && reg.group()->spill.isUse())
-                reg.group()->spill = *def->output();
-        }
-
-        // Place all intervals for this register on the allocation queue.
-        // During initial queueing use single queue items for groups of
-        // registers, so that they will be allocated together and reduce the
-        // risk of unnecessary conflicts. This is in keeping with the idea that
-        // register groups are effectively single registers whose value changes
-        // during execution. If any intervals in the group are evicted later
-        // then they will be reallocated individually.
-        size_t start = 0;
-        if (VirtualRegisterGroup* group = reg.group()) {
-            if (i == group->canonicalReg()) {
-                size_t priority = computePriority(group);
-                if (!allocationQueue.insert(QueueItem(group, priority)))
+        VirtualRegister& reg = vregs[i];
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            LiveBundle* bundle = range->bundle();
+            if (range == bundle->firstRange()) {
+                SpillSet* spill = SpillSet::New(alloc());
+                if (!spill)
                     return false;
-            }
-            start++;
-        }
-        for (; start < reg.numIntervals(); start++) {
-            LiveInterval* interval = reg.getInterval(start);
-            if (interval->numRanges() > 0) {
-                size_t priority = computePriority(interval);
-                if (!allocationQueue.insert(QueueItem(interval, priority)))
+                bundle->setSpillSet(spill);
+
+                size_t priority = computePriority(bundle);
+                if (!allocationQueue.insert(QueueItem(bundle, priority)))
                     return false;
             }
         }
     }
 
     return true;
 }
 
 static const size_t MAX_ATTEMPTS = 2;
 
 bool
-BacktrackingAllocator::tryAllocateFixed(LiveInterval* interval, bool* success,
-                                        bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+                                        bool* success, bool* pfixed,
+                                        LiveBundleVector& conflicting)
 {
-    // Spill intervals which are required to be in a certain stack slot.
-    if (!interval->requirement()->allocation().isRegister()) {
+    // Spill bundles which are required to be in a certain stack slot.
+    if (!requirement.allocation().isRegister()) {
         JitSpew(JitSpew_RegAlloc, "  stack allocation requirement");
-        interval->setAllocation(interval->requirement()->allocation());
+        bundle->setAllocation(requirement.allocation());
         *success = true;
         return true;
     }
 
-    AnyRegister reg = interval->requirement()->allocation().toRegister();
-    return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting);
+    AnyRegister reg = requirement.allocation().toRegister();
+    return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting);
 }
 
 bool
-BacktrackingAllocator::tryAllocateNonFixed(LiveInterval* interval, bool* success,
-                                           bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
+                                           Requirement requirement, Requirement hint,
+                                           bool* success, bool* pfixed,
+                                           LiveBundleVector& conflicting)
 {
-    // If we want, but do not require an interval to be in a specific
-    // register, only look at that register for allocating and evict
-    // or spill if it is not available. Picking a separate register may
-    // be even worse than spilling, as it will still necessitate moves
-    // and will tie up more registers than if we spilled.
-    if (interval->hint()->kind() == Requirement::FIXED) {
-        AnyRegister reg = interval->hint()->allocation().toRegister();
-        if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, conflicting))
+    // If we want, but do not require a bundle to be in a specific register,
+    // only look at that register for allocating and evict or spill if it is
+    // not available. Picking a separate register may be even worse than
+    // spilling, as it will still necessitate moves and will tie up more
+    // registers than if we spilled.
+    if (hint.kind() == Requirement::FIXED) {
+        AnyRegister reg = hint.allocation().toRegister();
+        if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting))
             return false;
         if (*success)
             return true;
     }
 
-    // Spill intervals which have no hint or register requirement.
-    if (interval->requirement()->kind() == Requirement::NONE &&
-        interval->hint()->kind() != Requirement::REGISTER)
-    {
-        spill(interval);
+    // Spill bundles which have no hint or register requirement.
+    if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) {
+        if (!spill(bundle))
+            return false;
         *success = true;
         return true;
     }
 
-    if (conflicting.empty() || minimalInterval(interval)) {
-        // Search for any available register which the interval can be
+    if (conflicting.empty() || minimalBundle(bundle)) {
+        // Search for any available register which the bundle can be
         // allocated to.
         for (size_t i = 0; i < AnyRegister::Total; i++) {
-            if (!tryAllocateRegister(registers[i], interval, success, pfixed, conflicting))
+            if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting))
                 return false;
             if (*success)
                 return true;
         }
     }
 
-    // Spill intervals which have no register requirement if they didn't get
+    // Spill bundles which have no register requirement if they didn't get
     // allocated.
-    if (interval->requirement()->kind() == Requirement::NONE) {
-        spill(interval);
+    if (requirement.kind() == Requirement::NONE) {
+        if (!spill(bundle))
+            return false;
         *success = true;
         return true;
     }
 
-    // We failed to allocate this interval.
+    // We failed to allocate this bundle.
     MOZ_ASSERT(!*success);
     return true;
 }
 
 bool
-BacktrackingAllocator::processInterval(LiveInterval* interval)
+BacktrackingAllocator::processBundle(LiveBundle* bundle)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %lu] [weight %lu]",
-                interval->toString(), computePriority(interval), computeSpillWeight(interval));
+                bundle->toString(), computePriority(bundle), computeSpillWeight(bundle));
     }
 
-    // An interval can be processed by doing any of the following:
+    // A bundle can be processed by doing any of the following:
     //
-    // - Assigning the interval a register. The interval cannot overlap any
-    //   other interval allocated for that physical register.
+    // - Assigning the bundle a register. The bundle cannot overlap any other
+    //   bundle allocated for that physical register.
     //
-    // - Spilling the interval, provided it has no register uses.
+    // - Spilling the bundle, provided it has no register uses.
     //
-    // - Splitting the interval into two or more intervals which cover the
-    //   original one. The new intervals are placed back onto the priority
-    //   queue for later processing.
+    // - Splitting the bundle into two or more bundles which cover the original
+    //   one. The new bundles are placed back onto the priority queue for later
+    //   processing.
     //
-    // - Evicting one or more existing allocated intervals, and then doing one
-    //   of the above operations. Evicted intervals are placed back on the
-    //   priority queue. Any evicted intervals must have a lower spill weight
-    //   than the interval being processed.
+    // - Evicting one or more existing allocated bundles, and then doing one
+    //   of the above operations. Evicted bundles are placed back on the
+    //   priority queue. Any evicted bundles must have a lower spill weight
+    //   than the bundle being processed.
     //
     // As long as this structure is followed, termination is guaranteed.
-    // In general, we want to minimize the amount of interval splitting
-    // (which generally necessitates spills), so allocate longer lived, lower
-    // weight intervals first and evict and split them later if they prevent
-    // allocation for higher weight intervals.
-
-    bool canAllocate = setIntervalRequirement(interval);
+    // In general, we want to minimize the amount of bundle splitting (which
+    // generally necessitates spills), so allocate longer lived, lower weight
+    // bundles first and evict and split them later if they prevent allocation
+    // for higher weight bundles.
+
+    Requirement requirement, hint;
+    bool canAllocate = computeRequirement(bundle, &requirement, &hint);
 
     bool fixed;
-    LiveIntervalVector conflicting;
+    LiveBundleVector conflicting;
     for (size_t attempt = 0;; attempt++) {
         if (canAllocate) {
             bool success = false;
             fixed = false;
             conflicting.clear();
 
-            // Ok, let's try allocating for this interval.
-            if (interval->requirement()->kind() == Requirement::FIXED) {
-                if (!tryAllocateFixed(interval, &success, &fixed, conflicting))
+            // Ok, let's try allocating for this bundle.
+            if (requirement.kind() == Requirement::FIXED) {
+                if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting))
                     return false;
             } else {
-                if (!tryAllocateNonFixed(interval, &success, &fixed, conflicting))
+                if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting))
                     return false;
             }
 
             // If that worked, we're done!
             if (success)
                 return true;
 
-            // If that didn't work, but we have one or more non-fixed intervals
+            // If that didn't work, but we have one or more non-fixed bundles
             // known to be conflicting, maybe we can evict them and try again.
             if (attempt < MAX_ATTEMPTS &&
                 !fixed &&
                 !conflicting.empty() &&
-                maximumSpillWeight(conflicting) < computeSpillWeight(interval))
-            {
-                for (size_t i = 0; i < conflicting.length(); i++) {
-                    if (!evictInterval(conflicting[i]))
-                        return false;
+                maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
+                {
+                    for (size_t i = 0; i < conflicting.length(); i++) {
+                        if (!evictBundle(conflicting[i]))
+                            return false;
+                    }
+                    continue;
                 }
-                continue;
-            }
         }
 
-        // A minimal interval cannot be split any further. If we try to split
-        // it at this point we will just end up with the same interval and will
-        // enter an infinite loop. Weights and the initial live intervals must
-        // be constructed so that any minimal interval is allocatable.
-        MOZ_ASSERT(!minimalInterval(interval));
-
-        LiveInterval* conflict = conflicting.empty() ? nullptr : conflicting[0];
-        return chooseIntervalSplit(interval, canAllocate && fixed, conflict);
+        // A minimal bundle cannot be split any further. If we try to split it
+        // it at this point we will just end up with the same bundle and will
+        // enter an infinite loop. Weights and the initial live ranges must
+        // be constructed so that any minimal bundle is allocatable.
+        MOZ_ASSERT(!minimalBundle(bundle));
+
+        LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
+        return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
     }
 }
 
 bool
-BacktrackingAllocator::processGroup(VirtualRegisterGroup* group)
+BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
+                                          Requirement *requirement, Requirement *hint)
 {
-    if (JitSpewEnabled(JitSpew_RegAlloc)) {
-        JitSpew(JitSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]",
-                group->registers[0], computePriority(group), computeSpillWeight(group));
-    }
-
-    bool fixed;
-    LiveInterval* conflict;
-    for (size_t attempt = 0;; attempt++) {
-        // Search for any available register which the group can be allocated to.
-        fixed = false;
-        conflict = nullptr;
-        for (size_t i = 0; i < AnyRegister::Total; i++) {
-            bool success;
-            if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict))
-                return false;
-            if (success) {
-                conflict = nullptr;
-                break;
+    // Set any requirement or hint on bundle according to its definition and
+    // uses. Return false if there are conflicting requirements which will
+    // require the bundle to be split.
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        VirtualRegister &reg = vregs[range->vreg()];
+
+        if (range->hasDefinition()) {
+            // Deal with any definition constraints/hints.
+            LDefinition::Policy policy = reg.def()->policy();
+            if (policy == LDefinition::FIXED) {
+                // Fixed policies get a FIXED requirement.
+                JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
+                        reg.def()->output()->toString());
+                if (!requirement->merge(Requirement(*reg.def()->output())))
+                    return false;
+            } else if (reg.ins()->isPhi()) {
+                // Phis don't have any requirements, but they should prefer their
+                // input allocations. This is captured by the group hints above.
+            } else {
+                // Non-phis get a REGISTER requirement.
+                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                    return false;
             }
         }
 
-        if (attempt < MAX_ATTEMPTS &&
-            !fixed &&
-            conflict &&
-            conflict->hasVreg() &&
-            computeSpillWeight(conflict) < computeSpillWeight(group))
-        {
-            if (!evictInterval(conflict))
-                return false;
-            continue;
-        }
-
-        for (size_t i = 0; i < group->registers.length(); i++) {
-            VirtualRegister& reg = vregs[group->registers[i]];
-            MOZ_ASSERT(reg.numIntervals() <= 2);
-            if (!processInterval(reg.getInterval(0)))
-                return false;
-        }
-
-        return true;
-    }
-}
-
-bool
-BacktrackingAllocator::setIntervalRequirement(LiveInterval* interval)
-{
-    // Set any requirement or hint on interval according to its definition and
-    // uses. Return false if there are conflicting requirements which will
-    // require the interval to be split.
-    interval->setHint(Requirement());
-    interval->setRequirement(Requirement());
-
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-
-    // Set a hint if another interval in the same group is in a register.
-    if (VirtualRegisterGroup* group = reg->group()) {
-        if (group->allocation.isRegister()) {
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
-                JitSpew(JitSpew_RegAlloc, "  Hint %s, used by group allocation",
-                        group->allocation.toString());
-            }
-            interval->setHint(Requirement(group->allocation));
-        }
-    }
-
-    if (interval->index() == 0) {
-        // The first interval is the definition, so deal with any definition
-        // constraints/hints.
-
-        LDefinition::Policy policy = reg->def()->policy();
-        if (policy == LDefinition::FIXED) {
-            // Fixed policies get a FIXED requirement.
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
-                JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
-                        reg->def()->output()->toString());
-            }
-            interval->setRequirement(Requirement(*reg->def()->output()));
-        } else if (reg->ins()->isPhi()) {
-            // Phis don't have any requirements, but they should prefer their
-            // input allocations. This is captured by the group hints above.
-        } else {
-            // Non-phis get a REGISTER requirement.
-            interval->setRequirement(Requirement(Requirement::REGISTER));
-        }
-    }
-
-    // Search uses for requirements.
-    for (UsePositionIterator iter = interval->usesBegin();
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse::Policy policy = iter->use->policy();
-        if (policy == LUse::FIXED) {
-            AnyRegister required = GetFixedRegister(reg->def(), iter->use);
-
-            if (JitSpewEnabled(JitSpew_RegAlloc)) {
+        // Search uses for requirements.
+        for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+            LUse::Policy policy = iter->use->policy();
+            if (policy == LUse::FIXED) {
+                AnyRegister required = GetFixedRegister(reg.def(), iter->use);
+
                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
                         required.name(), iter->pos.bits());
+
+                // If there are multiple fixed registers which the bundle is
+                // required to use, fail. The bundle will need to be split before
+                // it can be allocated.
+                if (!requirement->merge(Requirement(LAllocation(required))))
+                    return false;
+            } else if (policy == LUse::REGISTER) {
+                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                    return false;
+            } else if (policy == LUse::ANY) {
+                // ANY differs from KEEPALIVE by actively preferring a register.
+                hint->merge(Requirement(Requirement::REGISTER));
             }
-
-            // If there are multiple fixed registers which the interval is
-            // required to use, fail. The interval will need to be split before
-            // it can be allocated.
-            if (!interval->addRequirement(Requirement(LAllocation(required))))
-                return false;
-        } else if (policy == LUse::REGISTER) {
-            if (!interval->addRequirement(Requirement(Requirement::REGISTER)))
-                return false;
-        } else if (policy == LUse::ANY) {
-            // ANY differs from KEEPALIVE by actively preferring a register.
-            interval->addHint(Requirement(Requirement::REGISTER));
         }
     }
 
     return true;
 }
 
 bool
-BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
-                                                bool* psuccess, bool* pfixed, LiveInterval** pconflicting)
-{
-    *psuccess = false;
-
-    if (!r.allocatable)
-        return true;
-
-    if (!vregs[group->registers[0]].isCompatibleReg(r.reg))
-        return true;
-
-    bool allocatable = true;
-    LiveInterval* conflicting = nullptr;
-
-    for (size_t i = 0; i < group->registers.length(); i++) {
-        VirtualRegister& reg = vregs[group->registers[i]];
-        MOZ_ASSERT(reg.numIntervals() <= 2);
-        LiveInterval* interval = reg.getInterval(0);
-
-        for (size_t j = 0; j < interval->numRanges(); j++) {
-            AllocatedRange range(interval, interval->getRange(j)), existing;
-            if (r.allocations.contains(range, &existing)) {
-                if (conflicting) {
-                    if (conflicting != existing.interval)
-                        return true;
-                } else {
-                    conflicting = existing.interval;
-                }
-                allocatable = false;
-            }
-        }
-    }
-
-    if (!allocatable) {
-        MOZ_ASSERT(conflicting);
-        if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting))
-            *pconflicting = conflicting;
-        if (!conflicting->hasVreg())
-            *pfixed = true;
-        return true;
-    }
-
-    *psuccess = true;
-
-    group->allocation = LAllocation(r.reg);
-    return true;
-}
-
-bool
-BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
-                                           bool* success, bool* pfixed, LiveIntervalVector& conflicting)
+BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
+                                           bool* success, bool* pfixed, LiveBundleVector& conflicting)
 {
     *success = false;
 
     if (!r.allocatable)
         return true;
 
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-    if (!reg->isCompatibleReg(r.reg))
-        return true;
-
-    MOZ_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
-                  interval->requirement()->allocation() == LAllocation(r.reg));
-
-    LiveIntervalVector aliasedConflicting;
-
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i)), existing;
+    LiveBundleVector aliasedConflicting;
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        VirtualRegister &reg = vregs[range->vreg()];
+
+        if (!reg.isCompatible(r.reg))
+            return true;
+
         for (size_t a = 0; a < r.reg.numAliased(); a++) {
             PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
+            LiveRange* existing;
             if (!rAlias.allocations.contains(range, &existing))
                 continue;
-            if (existing.interval->hasVreg()) {
-                MOZ_ASSERT(existing.interval->getAllocation()->toRegister() == rAlias.reg);
+            if (existing->hasVreg()) {
+                MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
                 bool duplicate = false;
                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
-                    if (aliasedConflicting[i] == existing.interval) {
+                    if (aliasedConflicting[i] == existing->bundle()) {
                         duplicate = true;
                         break;
                     }
                 }
-                if (!duplicate && !aliasedConflicting.append(existing.interval))
+                if (!duplicate && !aliasedConflicting.append(existing->bundle()))
                     return false;
             } else {
-                if (JitSpewEnabled(JitSpew_RegAlloc)) {
-                    JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
-                            rAlias.reg.name(), existing.range->toString());
-                }
+                JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
+                        rAlias.reg.name(), existing->toString());
                 *pfixed = true;
                 return true;
             }
         }
     }
 
     if (!aliasedConflicting.empty()) {
-        // One or more aliased registers is allocated to another live interval
+        // One or more aliased registers is allocated to another bundle
         // overlapping this one. Keep track of the conflicting set, and in the
         // case of multiple conflicting sets keep track of the set with the
         // lowest maximum spill weight.
 
         if (JitSpewEnabled(JitSpew_RegAlloc)) {
             if (aliasedConflicting.length() == 1) {
-                LiveInterval* existing = aliasedConflicting[0];
-                JitSpew(JitSpew_RegAlloc, "  %s collides with v%u[%u] %s [weight %lu]",
-                        r.reg.name(), existing->vreg(), existing->index(),
-                        existing->rangesToString(), computeSpillWeight(existing));
+                LiveBundle* existing = aliasedConflicting[0];
+                JitSpew(JitSpew_RegAlloc, "  %s collides with %s [weight %lu]",
+                        r.reg.name(), existing->toString(), computeSpillWeight(existing));
             } else {
                 JitSpew(JitSpew_RegAlloc, "  %s collides with the following", r.reg.name());
                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
-                    LiveInterval* existing = aliasedConflicting[i];
-                    JitSpew(JitSpew_RegAlloc, "      v%u[%u] %s [weight %lu]",
-                            existing->vreg(), existing->index(),
-                            existing->rangesToString(), computeSpillWeight(existing));
+                    LiveBundle* existing = aliasedConflicting[i];
+                    JitSpew(JitSpew_RegAlloc, "      %s [weight %lu]",
+                            existing->toString(), computeSpillWeight(existing));
                 }
             }
         }
 
         if (conflicting.empty()) {
             if (!conflicting.appendAll(aliasedConflicting))
                 return false;
         } else {
@@ -866,459 +1416,360 @@ BacktrackingAllocator::tryAllocateRegist
                     return false;
             }
         }
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i));
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
         if (!r.allocations.insert(range))
             return false;
     }
 
-    // Set any register hint for allocating other intervals in the same group.
-    if (VirtualRegisterGroup* group = reg->group()) {
-        if (!group->allocation.isRegister())
-            group->allocation = LAllocation(r.reg);
-    }
-
-    interval->setAllocation(LAllocation(r.reg));
+    bundle->setAllocation(LAllocation(r.reg));
     *success = true;
     return true;
 }
 
 bool
-BacktrackingAllocator::evictInterval(LiveInterval* interval)
+BacktrackingAllocator::evictBundle(LiveBundle* bundle)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "  Evicting %s [priority %lu] [weight %lu]",
-                interval->toString(), computePriority(interval), computeSpillWeight(interval));
+                bundle->toString(), computePriority(bundle), computeSpillWeight(bundle));
     }
 
-    MOZ_ASSERT(interval->getAllocation()->isRegister());
-
-    AnyRegister reg(interval->getAllocation()->toRegister());
+    AnyRegister reg(bundle->allocation().toRegister());
     PhysicalRegister& physical = registers[reg.code()];
     MOZ_ASSERT(physical.reg == reg && physical.allocatable);
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i));
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
         physical.allocations.remove(range);
     }
 
-    interval->setAllocation(LAllocation());
-
-    size_t priority = computePriority(interval);
-    return allocationQueue.insert(QueueItem(interval, priority));
-}
-
-void
-BacktrackingAllocator::distributeUses(LiveInterval* interval,
-                                      const LiveIntervalVector& newIntervals)
-{
-    MOZ_ASSERT(newIntervals.length() >= 2);
-
-    // Simple redistribution of uses from an old interval to a set of new
-    // intervals. Intervals are permitted to overlap, in which case this will
-    // assign uses in the overlapping section to the interval with the latest
-    // start position.
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        CodePosition pos = iter->pos;
-        LiveInterval* addInterval = nullptr;
-        for (size_t i = 0; i < newIntervals.length(); i++) {
-            LiveInterval* newInterval = newIntervals[i];
-            if (newInterval->covers(pos)) {
-                if (!addInterval || newInterval->start() < addInterval->start())
-                    addInterval = newInterval;
-            }
-        }
-        addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-    }
+    bundle->setAllocation(LAllocation());
+
+    size_t priority = computePriority(bundle);
+    return allocationQueue.insert(QueueItem(bundle, priority));
 }
 
 bool
-BacktrackingAllocator::split(LiveInterval* interval,
-                             const LiveIntervalVector& newIntervals)
+BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle,
+                                              const LiveBundleVector& newBundles)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
-        JitSpew(JitSpew_RegAlloc, "    splitting interval %s into:", interval->toString());
-        for (size_t i = 0; i < newIntervals.length(); i++) {
-            JitSpew(JitSpew_RegAlloc, "      %s", newIntervals[i]->toString());
-            MOZ_ASSERT(newIntervals[i]->start() >= interval->start());
-            MOZ_ASSERT(newIntervals[i]->end() <= interval->end());
+        JitSpew(JitSpew_RegAlloc, "    splitting bundle %s into:", bundle->toString());
+        for (size_t i = 0; i < newBundles.length(); i++)
+            JitSpew(JitSpew_RegAlloc, "      %s", newBundles[i]->toString());
+    }
+
+    // Remove all ranges in the old bundle from their register's list.
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        vregs[range->vreg()].removeRange(range);
+    }
+
+    // Add all ranges in the new bundles to their register's list.
+    for (size_t i = 0; i < newBundles.length(); i++) {
+        LiveBundle* newBundle = newBundles[i];
+        for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            vregs[range->vreg()].addRange(range);
         }
     }
 
-    MOZ_ASSERT(newIntervals.length() >= 2);
-
-    // Find the earliest interval in the new list.
-    LiveInterval* first = newIntervals[0];
-    for (size_t i = 1; i < newIntervals.length(); i++) {
-        if (newIntervals[i]->start() < first->start())
-            first = newIntervals[i];
-    }
-
-    // Replace the old interval in the virtual register's state with the new intervals.
-    VirtualRegister* reg = &vregs[interval->vreg()];
-    reg->replaceInterval(interval, first);
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        if (newIntervals[i] != first && !reg->addInterval(newIntervals[i]))
+    // Queue the new bundles for register assignment.
+    for (size_t i = 0; i < newBundles.length(); i++) {
+        LiveBundle* newBundle = newBundles[i];
+        size_t priority = computePriority(newBundle);
+        if (!allocationQueue.insert(QueueItem(newBundle, priority)))
             return false;
     }
 
     return true;
 }
 
-bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector& newIntervals)
-{
-    // Queue the new intervals for register assignment.
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        LiveInterval* newInterval = newIntervals[i];
-        size_t priority = computePriority(newInterval);
-        if (!allocationQueue.insert(QueueItem(newInterval, priority)))
-            return false;
-    }
-    return true;
-}
-
-void
-BacktrackingAllocator::spill(LiveInterval* interval)
+bool
+BacktrackingAllocator::spill(LiveBundle* bundle)
 {
-    JitSpew(JitSpew_RegAlloc, "  Spilling interval");
-
-    MOZ_ASSERT(interval->requirement()->kind() == Requirement::NONE);
-    MOZ_ASSERT(!interval->getAllocation()->isStackSlot());
-
-    // We can't spill bogus intervals.
-    MOZ_ASSERT(interval->hasVreg());
-
-    BacktrackingVirtualRegister* reg = &vregs[interval->vreg()];
-
-    if (LiveInterval* spillInterval = interval->spillInterval()) {
-        JitSpew(JitSpew_RegAlloc, "    Spilling to existing spill interval");
-        while (!interval->usesEmpty())
-            spillInterval->addUse(interval->popUse());
-        reg->removeInterval(interval);
-        return;
+    JitSpew(JitSpew_RegAlloc, "  Spilling bundle");
+    MOZ_ASSERT(bundle->allocation().isBogus());
+
+    if (LiveBundle* spillParent = bundle->spillParent()) {
+        JitSpew(JitSpew_RegAlloc, "    Using existing spill bundle");
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            LiveRange* parentRange = spillParent->rangeFor(range->from());
+            MOZ_ASSERT(parentRange->contains(range));
+            MOZ_ASSERT(range->vreg() == parentRange->vreg());
+            range->distributeUses(parentRange);
+            MOZ_ASSERT(!range->hasUses());
+            vregs[range->vreg()].removeRange(range);
+        }
+        return true;
     }
 
-    bool useCanonical = !reg->hasCanonicalSpillExclude()
-        || interval->start() < reg->canonicalSpillExclude();
-
-    if (useCanonical) {
-        if (reg->canonicalSpill()) {
-            JitSpew(JitSpew_RegAlloc, "    Picked canonical spill location %s",
-                    reg->canonicalSpill()->toString());
-            interval->setAllocation(*reg->canonicalSpill());
-            return;
-        }
-
-        if (reg->group() && !reg->group()->spill.isUse()) {
-            JitSpew(JitSpew_RegAlloc, "    Reusing group spill location %s",
-                    reg->group()->spill.toString());
-            interval->setAllocation(reg->group()->spill);
-            reg->setCanonicalSpill(reg->group()->spill);
-            return;
-        }
-    }
-
-    uint32_t virtualSlot = numVirtualStackSlots++;
-
-    // Count virtual stack slots down from the maximum representable value, so
-    // that virtual slots are more obviously distinguished from real slots.
-    LStackSlot alloc(LAllocation::DATA_MASK - virtualSlot);
-    interval->setAllocation(alloc);
-
-    JitSpew(JitSpew_RegAlloc, "    Allocating spill location %s", alloc.toString());
-
-    if (useCanonical) {
-        reg->setCanonicalSpill(alloc);
-        if (reg->group())
-            reg->group()->spill = alloc;
-    }
+    return bundle->spillSet()->addSpilledBundle(bundle);
 }
 
 bool
 BacktrackingAllocator::pickStackSlots()
 {
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Pick Stack Slots"))
             return false;
 
-        for (size_t j = 0; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            if (!pickStackSlot(interval))
-                return false;
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            LiveBundle* bundle = range->bundle();
+
+            if (bundle->allocation().isBogus()) {
+                if (!pickStackSlot(bundle->spillSet()))
+                    return false;
+                MOZ_ASSERT(!bundle->allocation().isBogus());
+            }
         }
     }
 
     return true;
 }
 
 bool
-BacktrackingAllocator::pickStackSlot(LiveInterval* interval)
+BacktrackingAllocator::pickStackSlot(SpillSet* spillSet)
 {
-    LAllocation alloc = *interval->getAllocation();
-    MOZ_ASSERT(!alloc.isUse());
-
-    if (!isVirtualStackSlot(alloc))
-        return true;
-
-    BacktrackingVirtualRegister& reg = vregs[interval->vreg()];
-
-    // Get a list of all the intervals which will share this stack slot.
-    LiveIntervalVector commonIntervals;
-
-    if (!commonIntervals.append(interval))
-        return false;
-
-    if (reg.canonicalSpill() && alloc == *reg.canonicalSpill()) {
-        // Look for other intervals in the vreg using this spill slot.
-        for (size_t i = 0; i < reg.numIntervals(); i++) {
-            LiveInterval* ninterval = reg.getInterval(i);
-            if (ninterval != interval && *ninterval->getAllocation() == alloc) {
-                if (!commonIntervals.append(ninterval))
-                    return false;
-            }
-        }
-
-        // Look for intervals in other registers with the same group using this
-        // spill slot.
-        if (reg.group() && alloc == reg.group()->spill) {
-            for (size_t i = 0; i < reg.group()->registers.length(); i++) {
-                uint32_t nvreg = reg.group()->registers[i];
-                if (nvreg == interval->vreg())
-                    continue;
-                BacktrackingVirtualRegister& nreg = vregs[nvreg];
-                for (size_t j = 0; j < nreg.numIntervals(); j++) {
-                    LiveInterval* ninterval = nreg.getInterval(j);
-                    if (*ninterval->getAllocation() == alloc) {
-                        if (!commonIntervals.append(ninterval))
-                            return false;
-                    }
+    // Look through all ranges that have been spilled in this set for a
+    // register definition which is fixed to a stack or argument slot. If we
+    // find one, use it for all bundles that have been spilled. tryMergeBundles
+    // makes sure this reuse is possible when an initial bundle contains ranges
+    // from multiple virtual registers.
+    for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+        LiveBundle* bundle = spillSet->spilledBundle(i);
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (range->hasDefinition()) {
+                LDefinition* def = vregs[range->vreg()].def();
+                if (def->policy() == LDefinition::FIXED) {
+                    MOZ_ASSERT(!def->output()->isRegister());
+                    MOZ_ASSERT(!def->output()->isStackSlot());
+                    spillSet->setAllocation(*def->output());
+                    return true;
                 }
             }
         }
-    } else {
-        MOZ_ASSERT_IF(reg.group(), alloc != reg.group()->spill);
     }
 
-    if (!reuseOrAllocateStackSlot(commonIntervals, reg.type(), &alloc))
-        return false;
-
-    MOZ_ASSERT(!isVirtualStackSlot(alloc));
-
-    // Set the physical stack slot for each of the intervals found earlier.
-    for (size_t i = 0; i < commonIntervals.length(); i++)
-        commonIntervals[i]->setAllocation(alloc);
-
-    return true;
-}
-
-bool
-BacktrackingAllocator::reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
-                                                LAllocation* palloc)
-{
+    LDefinition::Type type = vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type();
+
     SpillSlotList* slotList;
     switch (StackSlotAllocator::width(type)) {
       case 4:  slotList = &normalSlots; break;
       case 8:  slotList = &doubleSlots; break;
       case 16: slotList = &quadSlots;   break;
       default:
         MOZ_CRASH("Bad width");
     }
 
     // Maximum number of existing spill slots we will look at before giving up
     // and allocating a new slot.
     static const size_t MAX_SEARCH_COUNT = 10;
 
-    if (!slotList->empty()) {
-        size_t searches = 0;
-        SpillSlot* stop = nullptr;
-        while (true) {
-            SpillSlot* spill = *slotList->begin();
-            if (!stop) {
-                stop = spill;
-            } else if (stop == spill) {
-                // We looked through every slot in the list.
-                break;
-            }
-
-            bool success = true;
-            for (size_t i = 0; i < intervals.length() && success; i++) {
-                LiveInterval* interval = intervals[i];
-                for (size_t j = 0; j < interval->numRanges(); j++) {
-                    AllocatedRange range(interval, interval->getRange(j)), existing;
-                    if (spill->allocated.contains(range, &existing)) {
-                        success = false;
-                        break;
-                    }
+    size_t searches = 0;
+    SpillSlot* stop = nullptr;
+    while (!slotList->empty()) {
+        SpillSlot* spillSlot = *slotList->begin();
+        if (!stop) {
+            stop = spillSlot;
+        } else if (stop == spillSlot) {
+            // We looked through every slot in the list.
+            break;
+        }
+
+        bool success = true;
+        for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+            LiveBundle* bundle = spillSet->spilledBundle(i);
+            for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+                LiveRange* range = LiveRange::get(*iter);
+                LiveRange* existing;
+                if (spillSlot->allocated.contains(range, &existing)) {
+                    success = false;
+                    break;
                 }
             }
-            if (success) {
-                // We can reuse this physical stack slot for the new intervals.
-                // Update the allocated ranges for the slot.
-                if (!insertAllRanges(spill->allocated, intervals))
-                    return false;
-                *palloc = spill->alloc;
-                return true;
-            }
-
-            // On a miss, move the spill to the end of the list. This will cause us
-            // to make fewer attempts to allocate from slots with a large and
-            // highly contended range.
-            slotList->popFront();
-            slotList->pushBack(spill);
-
-            if (++searches == MAX_SEARCH_COUNT)
+            if (!success)
                 break;
         }
+        if (success) {
+            // We can reuse this physical stack slot for the new bundles.
+            // Update the allocated ranges for the slot.
+            for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+                LiveBundle* bundle = spillSet->spilledBundle(i);
+                if (!insertAllRanges(spillSlot->allocated, bundle))
+                    return false;
+            }
+            spillSet->setAllocation(spillSlot->alloc);
+            return true;
+        }
+
+        // On a miss, move the spill to the end of the list. This will cause us
+        // to make fewer attempts to allocate from slots with a large and
+        // highly contended range.
+        slotList->popFront();
+        slotList->pushBack(spillSlot);
+
+        if (++searches == MAX_SEARCH_COUNT)
+            break;
     }
 
     // We need a new physical stack slot.
     uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
 
-    // Make sure the virtual and physical stack slots don't start overlapping.
-    if (isVirtualStackSlot(LStackSlot(stackSlot)))
-        return false;
-
-    SpillSlot* spill = new(alloc()) SpillSlot(stackSlot, alloc().lifoAlloc());
-    if (!spill)
+    SpillSlot* spillSlot = new(alloc()) SpillSlot(stackSlot, alloc().lifoAlloc());
+    if (!spillSlot)
         return false;
 
-    if (!insertAllRanges(spill->allocated, intervals))
-        return false;
-
-    *palloc = spill->alloc;
-
-    slotList->pushFront(spill);
+    for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
+        LiveBundle* bundle = spillSet->spilledBundle(i);
+        if (!insertAllRanges(spillSlot->allocated, bundle))
+            return false;
+    }
+
+    spillSet->setAllocation(spillSlot->alloc);
+
+    slotList->pushFront(spillSlot);
+    return true;
+}
+
+bool
+BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, LiveBundle* bundle)
+{
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (!set.insert(range))
+            return false;
+    }
     return true;
 }
 
 bool
-BacktrackingAllocator::insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals)
-{
-    for (size_t i = 0; i < intervals.length(); i++) {
-        LiveInterval* interval = intervals[i];
-        for (size_t j = 0; j < interval->numRanges(); j++) {
-            AllocatedRange range(interval, interval->getRange(j));
-            if (!set.insert(range))
-                return false;
-        }
-    }
-    return true;
-}
-
-// Add moves to resolve conflicting assignments between a block and its
-// predecessors.
-bool
 BacktrackingAllocator::resolveControlFlow()
 {
+    // Add moves to handle changing assignments for vregs over their lifetime.
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    // Look for places where a register's assignment changes in the middle of a
+    // basic block.
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)"))
             return false;
 
-        for (size_t j = 1; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            MOZ_ASSERT(interval->index() == j);
-
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            // The range which defines the register does not have a predecessor
+            // to add moves from.
+            if (range->hasDefinition())
+                continue;
+
+            // Ignore ranges that start at block boundaries. We will handle
+            // these in the next phase.
+            CodePosition start = range->from();
+            LNode* ins = insData[start];
+            if (start == entryOf(ins->block()))
+                continue;
+
+            // If we already saw a range which covers the start of this range
+            // and has the same allocation, we don't need an explicit move at
+            // the start of this range.
             bool skip = false;
-            for (int k = j - 1; k >= 0; k--) {
-                LiveInterval* prevInterval = reg->getInterval(k);
-                if (prevInterval->start() != interval->start())
-                    break;
-                if (*prevInterval->getAllocation() == *interval->getAllocation()) {
+            for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
+                 prevIter != iter;
+                 prevIter++)
+            {
+                LiveRange* prevRange = LiveRange::get(*prevIter);
+                if (prevRange->covers(start) &&
+                    prevRange->bundle()->allocation() == range->bundle()->allocation())
+                {
                     skip = true;
                     break;
                 }
             }
             if (skip)
                 continue;
 
-            CodePosition start = interval->start();
-            LNode* ins = insData[start];
-            if (start > entryOf(ins->block())) {
-                MOZ_ASSERT(start == inputOf(ins) || start == outputOf(ins));
-
-                LiveInterval* prevInterval = reg->intervalFor(start.previous());
-                if (start.subpos() == CodePosition::INPUT) {
-                    if (!moveInput(ins->toInstruction(), prevInterval, interval, reg->type()))
-                        return false;
-                } else {
-                    if (!moveAfter(ins->toInstruction(), prevInterval, interval, reg->type()))
-                        return false;
-                }
+            LiveRange* predecessorRange = reg.rangeFor(start.previous());
+            if (start.subpos() == CodePosition::INPUT) {
+                if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type()))
+                    return false;
+            } else {
+                if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type()))
+                    return false;
             }
         }
     }
 
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
             return false;
 
         LBlock* successor = graph.getBlock(i);
         MBasicBlock* mSuccessor = successor->mir();
         if (mSuccessor->numPredecessors() < 1)
             continue;
 
-        // Resolve phis to moves
+        // Resolve phis to moves.
         for (size_t j = 0; j < successor->numPhis(); j++) {
             LPhi* phi = successor->getPhi(j);
             MOZ_ASSERT(phi->numDefs() == 1);
             LDefinition* def = phi->getDef(0);
-            VirtualRegister* vreg = &vregs[def];
-            LiveInterval* to = vreg->intervalFor(entryOf(successor));
+            VirtualRegister& reg = vreg(def);
+            LiveRange* to = reg.rangeFor(entryOf(successor));
             MOZ_ASSERT(to);
 
             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
                 LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
                 MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
                 LAllocation* input = phi->getOperand(k);
-                LiveInterval* from = vregs[input].intervalFor(exitOf(predecessor));
+                LiveRange* from = vreg(input).rangeFor(exitOf(predecessor));
                 MOZ_ASSERT(from);
 
                 if (!moveAtExit(predecessor, from, to, def->type()))
                     return false;
             }
         }
 
-        // Resolve split intervals with moves
+        // Add moves to resolve graph edges with different allocations at their
+        // source and target.
         BitSet& live = liveIn[mSuccessor->id()];
 
         for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
             VirtualRegister& reg = vregs[*liveRegId];
 
             for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
                 LBlock* predecessor = mSuccessor->getPredecessor(j)->lir();
 
-                for (size_t k = 0; k < reg.numIntervals(); k++) {
-                    LiveInterval* to = reg.getInterval(k);
+                for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+                    LiveRange* to = LiveRange::get(*iter);
                     if (!to->covers(entryOf(successor)))
                         continue;
                     if (to->covers(exitOf(predecessor)))
                         continue;
 
-                    LiveInterval* from = reg.intervalFor(exitOf(predecessor));
+                    LiveRange* from = reg.rangeFor(exitOf(predecessor));
 
                     if (mSuccessor->numPredecessors() > 1) {
                         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
                         if (!moveAtExit(predecessor, from, to, reg.type()))
                             return false;
                     } else {
                         if (!moveAtEntry(successor, from, to, reg.type()))
                             return false;
@@ -1351,167 +1802,251 @@ BacktrackingAllocator::isRegisterUse(LUs
         return true;
 
       default:
         return false;
     }
 }
 
 bool
-BacktrackingAllocator::isRegisterDefinition(LiveInterval* interval)
+BacktrackingAllocator::isRegisterDefinition(LiveRange* range)
 {
-    if (interval->index() != 0)
+    if (!range->hasDefinition())
         return false;
 
-    VirtualRegister& reg = vregs[interval->vreg()];
+    VirtualRegister& reg = vregs[range->vreg()];
     if (reg.ins()->isPhi())
         return false;
 
     if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister())
         return false;
 
     return true;
 }
 
 bool
 BacktrackingAllocator::reifyAllocations()
 {
     JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
 
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(vregs[0u].numIntervals() == 0);
+    MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        VirtualRegister* reg = &vregs[i];
+        VirtualRegister& reg = vregs[i];
 
         if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
             return false;
 
-        for (size_t j = 0; j < reg->numIntervals(); j++) {
-            LiveInterval* interval = reg->getInterval(j);
-            MOZ_ASSERT(interval->index() == j);
-
-            if (interval->index() == 0) {
-                reg->def()->setOutput(*interval->getAllocation());
-                if (reg->ins()->recoversInput()) {
-                    LSnapshot* snapshot = reg->ins()->toInstruction()->snapshot();
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            if (range->hasDefinition()) {
+                reg.def()->setOutput(range->bundle()->allocation());
+                if (reg.ins()->recoversInput()) {
+                    LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
                     for (size_t i = 0; i < snapshot->numEntries(); i++) {
                         LAllocation* entry = snapshot->getEntry(i);
                         if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
-                            *entry = *reg->def()->output();
+                            *entry = *reg.def()->output();
                     }
                 }
             }
 
-            for (UsePositionIterator iter(interval->usesBegin());
-                 iter != interval->usesEnd();
-                 iter++)
-            {
+            for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
                 LAllocation* alloc = iter->use;
-                *alloc = *interval->getAllocation();
+                *alloc = range->bundle()->allocation();
 
                 // For any uses which feed into MUST_REUSE_INPUT definitions,
                 // add copies if the use and def have different allocations.
                 LNode* ins = insData[iter->pos];
                 if (LDefinition* def = FindReusingDefinition(ins, alloc)) {
-                    LiveInterval* outputInterval =
-                        vregs[def->virtualRegister()].intervalFor(outputOf(ins));
-                    LAllocation* res = outputInterval->getAllocation();
-                    LAllocation* sourceAlloc = interval->getAllocation();
-
-                    if (*res != *alloc) {
+                    LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
+                    LAllocation res = outputRange->bundle()->allocation();
+                    LAllocation sourceAlloc = range->bundle()->allocation();
+
+                    if (res != *alloc) {
                         LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
-                        if (!group->addAfter(sourceAlloc, res, reg->type()))
+                        if (!group->addAfter(sourceAlloc, res, reg.type()))
                             return false;
-                        *alloc = *res;
+                        *alloc = res;
                     }
                 }
             }
 
-            addLiveRegistersForInterval(reg, interval);
+            addLiveRegistersForRange(reg, range);
         }
     }
 
     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
     return true;
 }
 
+size_t
+BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from)
+{
+    size_t i = 0;
+    for (; i < graph.numNonCallSafepoints(); i++) {
+        const LInstruction* ins = graph.getNonCallSafepoint(i);
+        if (from <= inputOf(ins))
+            break;
+    }
+    return i;
+}
+
+void
+BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range)
+{
+    // Fill in the live register sets for all non-call safepoints.
+    LAllocation a = range->bundle()->allocation();
+    if (!a.isRegister())
+        return;
+
+    // Don't add output registers to the safepoint.
+    CodePosition start = range->from();
+    if (range->hasDefinition() && !reg.isTemp()) {
+#ifdef CHECK_OSIPOINT_REGISTERS
+        // We don't add the output register to the safepoint,
+        // but it still might get added as one of the inputs.
+        // So eagerly add this reg to the safepoint clobbered registers.
+        if (reg.ins()->isInstruction()) {
+            if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint())
+                safepoint->addClobberedRegister(a.toRegister());
+        }
+#endif
+        start = start.next();
+    }
+
+    size_t i = findFirstNonCallSafepoint(start);
+    for (; i < graph.numNonCallSafepoints(); i++) {
+        LInstruction* ins = graph.getNonCallSafepoint(i);
+        CodePosition pos = inputOf(ins);
+
+        // Safepoints are sorted, so we can shortcut out of this loop
+        // if we go out of range.
+        if (range->to() <= pos)
+            break;
+
+        MOZ_ASSERT(range->covers(pos));
+
+        LSafepoint* safepoint = ins->safepoint();
+        safepoint->addLiveRegister(a.toRegister());
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+        if (reg.isTemp())
+            safepoint->addClobberedRegister(a.toRegister());
+#endif
+    }
+}
+
+static inline bool
+IsNunbox(VirtualRegister& reg)
+{
+#ifdef JS_NUNBOX32
+    return reg.type() == LDefinition::TYPE ||
+           reg.type() == LDefinition::PAYLOAD;
+#else
+    return false;
+#endif
+}
+
+static inline bool
+IsSlotsOrElements(VirtualRegister& reg)
+{
+    return reg.type() == LDefinition::SLOTS;
+}
+
+static inline bool
+IsTraceable(VirtualRegister& reg)
+{
+    if (reg.type() == LDefinition::OBJECT)
+        return true;
+#ifdef JS_PUNBOX64
+    if (reg.type() == LDefinition::BOX)
+        return true;
+#endif
+    return false;
+}
+
+size_t
+BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom)
+{
+    size_t i = startFrom;
+    for (; i < graph.numSafepoints(); i++) {
+        LInstruction* ins = graph.getSafepoint(i);
+        if (pos <= inputOf(ins))
+            break;
+    }
+    return i;
+}
+
 bool
 BacktrackingAllocator::populateSafepoints()
 {
     JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
 
     size_t firstSafepoint = 0;
 
-    // Virtual register number 0 is unused.
     MOZ_ASSERT(!vregs[0u].def());
-    for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) {
-        BacktrackingVirtualRegister* reg = &vregs[i];
-
-        if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
+    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        VirtualRegister& reg = vregs[i];
+
+        if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
             continue;
 
-        firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
+        firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
         if (firstSafepoint >= graph.numSafepoints())
             break;
 
-        // Find the furthest endpoint. Intervals are sorted, but by start
-        // position, and we want the greatest end position.
-        CodePosition end = reg->getInterval(0)->end();
-        for (size_t j = 1; j < reg->numIntervals(); j++)
-            end = Max(end, reg->getInterval(j)->end());
-
-        for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
-            LInstruction* ins = graph.getSafepoint(j);
-
-            // Stop processing safepoints if we know we're out of this virtual
-            // register's range.
-            if (end < outputOf(ins))
-                break;
-
-            // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
-            // is not used with gcthings or nunboxes, or we would have to add the input reg
-            // to this safepoint.
-            if (ins == reg->ins() && !reg->isTemp()) {
-                DebugOnly<LDefinition*> def = reg->def();
-                MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
-                              def->type() == LDefinition::GENERAL ||
-                              def->type() == LDefinition::INT32 ||
-                              def->type() == LDefinition::FLOAT32 ||
-                              def->type() == LDefinition::DOUBLE);
-                continue;
-            }
-
-            LSafepoint* safepoint = ins->safepoint();
-
-            for (size_t k = 0; k < reg->numIntervals(); k++) {
-                LiveInterval* interval = reg->getInterval(k);
-                if (!interval->covers(inputOf(ins)))
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
+                LInstruction* ins = graph.getSafepoint(j);
+
+                if (!range->covers(inputOf(ins))) {
+                    if (inputOf(ins) >= range->to())
+                        break;
                     continue;
-
-                LAllocation* a = interval->getAllocation();
-                if (a->isGeneralReg() && ins->isCall())
+                }
+
+                // Include temps but not instruction outputs. Also make sure
+                // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
+                // we would have to add the input reg to this safepoint.
+                if (ins == reg.ins() && !reg.isTemp()) {
+                    DebugOnly<LDefinition*> def = reg.def();
+                    MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
+                                  def->type() == LDefinition::GENERAL ||
+                                  def->type() == LDefinition::INT32 ||
+                                  def->type() == LDefinition::FLOAT32 ||
+                                  def->type() == LDefinition::DOUBLE);
                     continue;
-
-                switch (reg->type()) {
+                }
+
+                LSafepoint* safepoint = ins->safepoint();
+
+                LAllocation a = range->bundle()->allocation();
+                if (a.isGeneralReg() && ins->isCall())
+                    continue;
+
+                switch (reg.type()) {
                   case LDefinition::OBJECT:
-                    safepoint->addGcPointer(*a);
+                    safepoint->addGcPointer(a);
                     break;
                   case LDefinition::SLOTS:
-                    safepoint->addSlotsOrElementsPointer(*a);
+                    safepoint->addSlotsOrElementsPointer(a);
                     break;
 #ifdef JS_NUNBOX32
                   case LDefinition::TYPE:
-                    safepoint->addNunboxType(i, *a);
+                    safepoint->addNunboxType(i, a);
                     break;
                   case LDefinition::PAYLOAD:
-                    safepoint->addNunboxPayload(i, *a);
+                    safepoint->addNunboxPayload(i, a);
                     break;
 #else
                   case LDefinition::BOX:
-                    safepoint->addBoxedValue(*a);
+                    safepoint->addBoxedValue(a);
                     break;
 #endif
                   default:
                     MOZ_CRASH("Bad register type");
                 }
             }
         }
     }
@@ -1522,37 +2057,41 @@ BacktrackingAllocator::populateSafepoint
 bool
 BacktrackingAllocator::annotateMoveGroups()
 {
     // Annotate move groups in the LIR graph with any register that is not
     // allocated at that point and can be used as a scratch register. This is
     // only required for x86, as other platforms always have scratch registers
     // available for use.
 #ifdef JS_CODEGEN_X86
+    LiveRange* range = LiveRange::New(alloc(), 0, CodePosition(), CodePosition().next());
+    if (!range)
+        return false;
+
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Backtracking Annotate Move Groups"))
             return false;
 
         LBlock* block = graph.getBlock(i);
         LInstruction* last = nullptr;
         for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
             if (iter->isMoveGroup()) {
                 CodePosition from = last ? outputOf(last) : entryOf(block);
-                LiveInterval::Range range(from, from.next());
-                AllocatedRange search(nullptr, &range), existing;
+                range->setTo(from.next());
+                range->setFrom(from);
 
                 for (size_t i = 0; i < AnyRegister::Total; i++) {
                     PhysicalRegister& reg = registers[i];
                     if (reg.reg.isFloat() || !reg.allocatable)
                         continue;
 
                     // This register is unavailable for use if (a) it is in use
-                    // by some live interval immediately before the move group,
+                    // by some live range immediately before the move group,
                     // or (b) it is an operand in one of the group's moves. The
-                    // latter case handles live intervals which end immediately
+                    // latter case handles live ranges which end immediately
                     // before the move group or start immediately after.
                     // For (b) we need to consider move groups immediately
                     // preceding or following this one.
 
                     if (iter->toMoveGroup()->uses(reg.reg.gpr()))
                         continue;
                     bool found = false;
                     LInstructionIterator niter(iter);
@@ -1576,766 +2115,824 @@ BacktrackingAllocator::annotateMoveGroup
                                     break;
                                 }
                             } else {
                                 break;
                             }
                         } while (riter != block->begin());
                     }
 
-                    if (found || reg.allocations.contains(search, &existing))
+                    LiveRange* existing;
+                    if (found || reg.allocations.contains(range, &existing))
                         continue;
 
                     iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
                     break;
                 }
             } else {
                 last = *iter;
             }
         }
     }
 #endif
 
     return true;
 }
 
+/////////////////////////////////////////////////////////////////////
+// Debugging methods
+/////////////////////////////////////////////////////////////////////
+
+#ifdef DEBUG
+
+const char*
+LiveRange::toString() const
+{
+    // Not reentrant!
+    static char buf[2000];
+
+    char* cursor = buf;
+    char* end = cursor + sizeof(buf);
+
+    int n = JS_snprintf(cursor, end - cursor, "v%u [%u,%u)",
+                        hasVreg() ? vreg() : 0, from().bits(), to().bits());
+    if (n < 0) MOZ_CRASH();
+    cursor += n;
+
+    if (bundle() && !bundle()->allocation().isBogus()) {
+        n = JS_snprintf(cursor, end - cursor, " %s", bundle()->allocation().toString());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    if (hasDefinition()) {
+        n = JS_snprintf(cursor, end - cursor, " (def)");
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    for (UsePositionIterator iter = usesBegin(); iter; iter++) {
+        n = JS_snprintf(cursor, end - cursor, " %s@%u", iter->use->toString(), iter->pos.bits());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    return buf;
+}
+
+const char*
+LiveBundle::toString() const
+{
+    // Not reentrant!
+    static char buf[2000];
+
+    char* cursor = buf;
+    char* end = cursor + sizeof(buf);
+
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
+        int n = JS_snprintf(cursor, end - cursor, "%s %s",
+                            (iter == rangesBegin()) ? "" : " ##",
+                            LiveRange::get(*iter)->toString());
+        if (n < 0) MOZ_CRASH();
+        cursor += n;
+    }
+
+    return buf;
+}
+
+#endif // DEBUG
+
 void
-BacktrackingAllocator::dumpRegisterGroups()
+BacktrackingAllocator::dumpVregs()
 {
 #ifdef DEBUG
-    bool any = false;
-
-    // Virtual register number 0 is unused.
-    MOZ_ASSERT(!vregs[0u].group());
-    for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
-        VirtualRegisterGroup* group = vregs[i].group();
-        if (group && i == group->canonicalReg()) {
-            if (!any) {
-                fprintf(stderr, "Register groups:\n");
-                any = true;
+    MOZ_ASSERT(!vregs[0u].hasRanges());
+
+    fprintf(stderr, "Live ranges by virtual register:\n");
+
+    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        fprintf(stderr, "  ");
+        VirtualRegister& reg = vregs[i];
+        for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
+            if (iter != reg.rangesBegin())
+                fprintf(stderr, " ## ");
+            fprintf(stderr, "%s", LiveRange::get(*iter)->toString());
+        }
+        fprintf(stderr, "\n");
+    }
+
+    fprintf(stderr, "\nLive ranges by bundle:\n");
+
+    for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
+        VirtualRegister& reg = vregs[i];
+        for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter; baseIter++) {
+            LiveRange* range = LiveRange::get(*baseIter);
+            LiveBundle* bundle = range->bundle();
+            if (range == bundle->firstRange()) {
+                fprintf(stderr, "  ");
+                for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+                    if (iter != bundle->rangesBegin())
+                        fprintf(stderr, " ## ");
+                    fprintf(stderr, "%s", LiveRange::get(*iter)->toString());
+                }
+                fprintf(stderr, "\n");
             }
-            fprintf(stderr, " ");
-            for (size_t j = 0; j < group->registers.length(); j++)
-                fprintf(stderr, " v%u", group->registers[j]);
-            fprintf(stderr, "\n");
         }
     }
-    if (any)
-        fprintf(stderr, "\n");
 #endif
 }
 
 void
 BacktrackingAllocator::dumpFixedRanges()
 {
 #ifdef DEBUG
-    bool any = false;
-
-    for (size_t i = 0; i < AnyRegister::Total; i++) {
-        if (registers[i].allocatable && fixedIntervals[i]->numRanges() != 0) {
-            if (!any) {
-                fprintf(stderr, "Live ranges by physical register:\n");
-                any = true;
-            }
-            fprintf(stderr, "  %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->toString());
-        }
-    }
-
-    if (any)
-        fprintf(stderr, "\n");
+    fprintf(stderr, "Live ranges by physical register: %s\n", callRanges->toString());
 #endif // DEBUG
 }
 
 #ifdef DEBUG
-struct BacktrackingAllocator::PrintLiveIntervalRange
+struct BacktrackingAllocator::PrintLiveRange
 {
     bool& first_;
 
-    explicit PrintLiveIntervalRange(bool& first) : first_(first) {}
-
-    void operator()(const AllocatedRange& item)
+    explicit PrintLiveRange(bool& first) : first_(first) {}
+
+    void operator()(const LiveRange* range)
     {
-        if (item.range == item.interval->getRange(0)) {
-            if (first_)
-                first_ = false;
-            else
-                fprintf(stderr, " /");
-            if (item.interval->hasVreg())
-                fprintf(stderr, " v%u[%u]", item.interval->vreg(), item.interval->index());
-            fprintf(stderr, "%s", item.interval->rangesToString());
-        }
+        if (first_)
+            first_ = false;
+        else
+            fprintf(stderr, " /");
+        fprintf(stderr, " %s", range->toString());
     }
 };
 #endif
 
 void
 BacktrackingAllocator::dumpAllocations()
 {
 #ifdef DEBUG
-    fprintf(stderr, "Allocations by virtual register:\n");
+    fprintf(stderr, "Allocations:\n");
 
     dumpVregs();
 
     fprintf(stderr, "Allocations by physical register:\n");
 
     for (size_t i = 0; i < AnyRegister::Total; i++) {
         if (registers[i].allocatable && !registers[i].allocations.empty()) {
             fprintf(stderr, "  %s:", AnyRegister::FromCode(i).name());
             bool first = true;
-            registers[i].allocations.forEach(PrintLiveIntervalRange(first));
+            registers[i].allocations.forEach(PrintLiveRange(first));
             fprintf(stderr, "\n");
         }
     }
 
     fprintf(stderr, "\n");
 #endif // DEBUG
 }
 
-bool
-BacktrackingAllocator::addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
-                                       LiveInterval* spillInterval,
-                                       CodePosition from, CodePosition to)
-{
-    LiveInterval* interval = LiveInterval::New(alloc(), vreg, 0);
-    interval->setSpillInterval(spillInterval);
-    return interval->addRange(from, to) && intervals.append(interval);
-}
-
 ///////////////////////////////////////////////////////////////////////////////
 // Heuristic Methods
 ///////////////////////////////////////////////////////////////////////////////
 
 size_t
-BacktrackingAllocator::computePriority(const LiveInterval* interval)
+BacktrackingAllocator::computePriority(LiveBundle* bundle)
 {
-    // The priority of an interval is its total length, so that longer lived
-    // intervals will be processed before shorter ones (even if the longer ones
-    // have a low spill weight). See processInterval().
+    // The priority of a bundle is its total length, so that longer lived
+    // bundles will be processed before shorter ones (even if the longer ones
+    // have a low spill weight). See processBundle().
     size_t lifetimeTotal = 0;
 
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        const LiveInterval::Range* range = interval->getRange(i);
-        lifetimeTotal += range->to - range->from;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        lifetimeTotal += range->to() - range->from();
     }
 
     return lifetimeTotal;
 }
 
-size_t
-BacktrackingAllocator::computePriority(const VirtualRegisterGroup* group)
+bool
+BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins)
 {
-    size_t priority = 0;
-    for (size_t j = 0; j < group->registers.length(); j++) {
-        uint32_t vreg = group->registers[j];
-        priority += computePriority(vregs[vreg].getInterval(0));
-    }
-    return priority;
+    // Whether this is a minimal range capturing a definition at ins.
+    return (range->to() <= minimalDefEnd(ins).next()) &&
+           ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins));
 }
 
 bool
-BacktrackingAllocator::minimalDef(const LiveInterval* interval, LNode* ins)
+BacktrackingAllocator::minimalUse(LiveRange* range, LNode* ins)
 {
-    // Whether interval is a minimal interval capturing a definition at ins.
-    return (interval->end() <= minimalDefEnd(ins).next()) &&
-        ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins));
+    // Whether this is a minimal range capturing a use at ins.
+    return (range->from() == inputOf(ins)) &&
+           (range->to() == outputOf(ins) || range->to() == outputOf(ins).next());
 }
 
 bool
-BacktrackingAllocator::minimalUse(const LiveInterval* interval, LNode* ins)
+BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed)
 {
-    // Whether interval is a minimal interval capturing a use at ins.
-    return (interval->start() == inputOf(ins)) &&
-        (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
-}
-
-bool
-BacktrackingAllocator::minimalInterval(const LiveInterval* interval, bool* pfixed)
-{
-    if (!interval->hasVreg()) {
+    LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
+    LiveRange* range = LiveRange::get(*iter);
+
+    if (!range->hasVreg()) {
         *pfixed = true;
         return true;
     }
 
-    if (interval->index() == 0) {
-        VirtualRegister& reg = vregs[interval->vreg()];
+    // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
+    // each range into a separate bundle.
+    if (++iter)
+        return false;
+
+    if (range->hasDefinition()) {
+        VirtualRegister& reg = vregs[range->vreg()];
         if (pfixed)
             *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister();
-        return minimalDef(interval, reg.ins());
+        return minimalDef(range, reg.ins());
     }
 
     bool fixed = false, minimal = false, multiple = false;
 
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
-        if (iter != interval->usesBegin())
+    for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+        if (iter != range->usesBegin())
             multiple = true;
         LUse* use = iter->use;
 
         switch (use->policy()) {
           case LUse::FIXED:
             if (fixed)
                 return false;
             fixed = true;
-            if (minimalUse(interval, insData[iter->pos]))
+            if (minimalUse(range, insData[iter->pos]))
                 minimal = true;
             break;
 
           case LUse::REGISTER:
-            if (minimalUse(interval, insData[iter->pos]))
+            if (minimalUse(range, insData[iter->pos]))
                 minimal = true;
             break;
 
           default:
             break;
         }
     }
 
-    // If an interval contains a fixed use and at least one other use,
-    // splitAtAllRegisterUses will split each use into a different interval.
+    // If a range contains a fixed use and at least one other use,
+    // splitAtAllRegisterUses will split each use into a different bundle.
     if (multiple && fixed)
         minimal = false;
 
     if (pfixed)
         *pfixed = fixed;
     return minimal;
 }
 
 size_t
-BacktrackingAllocator::computeSpillWeight(const LiveInterval* interval)
+BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle)
 {
-    // Minimal intervals have an extremely high spill weight, to ensure they
-    // can evict any other intervals and be allocated to a register.
+    // Minimal bundles have an extremely high spill weight, to ensure they
+    // can evict any other bundles and be allocated to a register.
     bool fixed;
-    if (minimalInterval(interval, &fixed))
+    if (minimalBundle(bundle, &fixed))
         return fixed ? 2000000 : 1000000;
 
     size_t usesTotal = 0;
 
-    if (interval->index() == 0) {
-        VirtualRegister* reg = &vregs[interval->vreg()];
-        if (reg->def()->policy() == LDefinition::FIXED && reg->def()->output()->isRegister())
-            usesTotal += 2000;
-        else if (!reg->ins()->isPhi())
-            usesTotal += 2000;
-    }
-
-    for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
-        LUse* use = iter->use;
-
-        switch (use->policy()) {
-          case LUse::ANY:
-            usesTotal += 1000;
-            break;
-
-          case LUse::REGISTER:
-          case LUse::FIXED:
-            usesTotal += 2000;
-            break;
-
-          case LUse::KEEPALIVE:
-            break;
-
-          default:
-            // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
-            MOZ_CRASH("Bad use");
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        if (range->hasDefinition()) {
+            VirtualRegister& reg = vregs[range->vreg()];
+            if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister())
+                usesTotal += 2000;
+            else if (!reg.ins()->isPhi())
+                usesTotal += 2000;
+        }
+
+        for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
+            LUse* use = iter->use;
+
+            switch (use->policy()) {
+              case LUse::ANY:
+                usesTotal += 1000;
+                break;
+
+              case LUse::REGISTER:
+              case LUse::FIXED:
+                usesTotal += 2000;
+                break;
+
+              case LUse::KEEPALIVE:
+                break;
+
+              default:
+                // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
+                MOZ_CRASH("Bad use");
+            }
         }
     }
 
-    // Intervals for registers in groups get higher weights.
-    if (interval->hint()->kind() != Requirement::NONE)
-        usesTotal += 2000;
-
     // Compute spill weight as a use density, lowering the weight for long
-    // lived intervals with relatively few uses.
-    size_t lifetimeTotal = computePriority(interval);
+    // lived bundles with relatively few uses.
+    size_t lifetimeTotal = computePriority(bundle);
     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
 }
 
 size_t
-BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup* group)
+BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
 {
     size_t maxWeight = 0;
-    for (size_t j = 0; j < group->registers.length(); j++) {
-        uint32_t vreg = group->registers[j];
-        maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0)));
-    }
-    return maxWeight;
-}
-
-size_t
-BacktrackingAllocator::maximumSpillWeight(const LiveIntervalVector& intervals)
-{
-    size_t maxWeight = 0;
-    for (size_t i = 0; i < intervals.length(); i++)
-        maxWeight = Max(maxWeight, computeSpillWeight(intervals[i]));
+    for (size_t i = 0; i < bundles.length(); i++)
+        maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
     return maxWeight;
 }
 
 bool
-BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval* interval, bool* success)
+BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
 {
-    // If this interval has portions that are hot and portions that are cold,
+    // If this bundle has portions that are hot and portions that are cold,
     // split it at the boundaries between hot and cold code.
 
-    const LiveInterval::Range* hotRange = nullptr;
-
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        AllocatedRange range(interval, interval->getRange(i)), existing;
-        if (hotcode.contains(range, &existing)) {
-            hotRange = existing.range;
+    LiveRange* hotRange = nullptr;
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (hotcode.contains(range, &hotRange))
             break;
-        }
     }
 
-    // Don't split if there is no hot code in the interval.
+    // Don't split if there is no hot code in the bundle.
     if (!hotRange) {
-        JitSpew(JitSpew_RegAlloc, "  interval does not contain hot code");
+        JitSpew(JitSpew_RegAlloc, "  bundle does not contain hot code");
         return true;
     }
 
-    // Don't split if there is no cold code in the interval.
+    // Don't split if there is no cold code in the bundle.
     bool coldCode = false;
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        if (!hotRange->contains(interval->getRange(i))) {
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        if (!hotRange->contains(range)) {
             coldCode = true;
             break;
         }
     }
     if (!coldCode) {
-        JitSpew(JitSpew_RegAlloc, "  interval does not contain cold code");
+        JitSpew(JitSpew_RegAlloc, "  bundle does not contain cold code");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString());
 
     // Tweak the splitting method when compiling asm.js code to look at actual
     // uses within the hot/cold code. This heuristic is in place as the below
     // mechanism regresses several asm.js tests. Hopefully this will be fixed
     // soon and this special case removed. See bug 948838.
     if (compilingAsmJS()) {
         SplitPositionVector splitPositions;
-        if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to))
+        if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
             return false;
         *success = true;
-        return splitAt(interval, splitPositions);
+        return splitAt(bundle, splitPositions);
     }
 
-    LiveInterval* hotInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-    LiveInterval* preInterval = nullptr;
-    LiveInterval* postInterval = nullptr;
-
-    // Accumulate the ranges of hot and cold code in the interval. Note that
+    LiveBundle* hotBundle = LiveBundle::New(alloc(), bundle->spillSet(), bundle->spillParent());
+    if (!hotBundle)
+        return false;
+    LiveBundle* preBundle = nullptr;
+    LiveBundle* postBundle = nullptr;
+
+    // Accumulate the ranges of hot and cold code in the bundle. Note that
     // we are only comparing with the single hot range found, so the cold code
     // may contain separate hot ranges.
-    Vector<LiveInterval::Range, 1, SystemAllocPolicy> hotList, coldList;
-    for (size_t i = 0; i < interval->numRanges(); i++) {
-        LiveInterval::Range hot, coldPre, coldPost;
-        interval->getRange(i)->intersect(hotRange, &coldPre, &hot, &coldPost);
-
-        if (!hot.empty() && !hotInterval->addRange(hot.from, hot.to))
-            return false;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+        LiveRange::Range hot, coldPre, coldPost;
+        range->intersect(hotRange, &coldPre, &hot, &coldPost);
+
+        if (!hot.empty()) {
+            if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to))
+                return false;
+        }
 
         if (!coldPre.empty()) {
-            if (!preInterval)
-                preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-            if (!preInterval->addRange(coldPre.from, coldPre.to))
+            if (!preBundle) {
+                preBundle = LiveBundle::New(alloc(), bundle->spillSet(), bundle->spillParent());
+                if (!preBundle)
+                    return false;
+            }
+            if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
                 return false;
         }
 
         if (!coldPost.empty()) {
-            if (!postInterval)
-                postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
-            if (!postInterval->addRange(coldPost.from, coldPost.to))
+            if (!postBundle)
+                postBundle = LiveBundle::New(alloc(), bundle->spillSet(), bundle->spillParent());
+            if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
                 return false;
         }
     }
 
-    MOZ_ASSERT(preInterval || postInterval);
-    MOZ_ASSERT(hotInterval->numRanges());
-
-    LiveIntervalVector newIntervals;
-    if (!newIntervals.append(hotInterval))
+    MOZ_ASSERT(preBundle || postBundle);
+    MOZ_ASSERT(hotBundle->numRanges() != 0);
+
+    LiveBundleVector newBundles;
+    if (!newBundles.append(hotBundle))
         return false;
-    if (preInterval && !newIntervals.append(preInterval))
+    if (preBundle && !newBundles.append(preBundle))
         return false;
-    if (postInterval && !newIntervals.append(postInterval))
+    if (postBundle && !newBundles.append(postBundle))
         return false;
 
-    distributeUses(interval, newIntervals);
-
     *success = true;
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    return splitAndRequeueBundles(bundle, newBundles);
 }
 
 bool
-BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success)
+BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
+                                                    bool* success)
 {
-    // If this interval's later uses do not require it to be in a register,
+    // If this bundle's later uses do not require it to be in a register,
     // split it after the last use which does require a register. If conflict
     // is specified, only consider register uses before the conflict starts.
 
     CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
 
-    // If the definition of the interval is in a register, consider that a
-    // register use too for our purposes here.
-    if (isRegisterDefinition(interval)) {
-        CodePosition spillStart = minimalDefEnd(insData[interval->start()]).next();
-        if (!conflict || spillStart < conflict->start()) {
-            lastUse = lastRegisterFrom = interval->start();
-            lastRegisterTo = spillStart;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        // If the range defines a register, consider that a register use for
+        // our purposes here.
+        if (isRegisterDefinition(range)) {
+            CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
+            if (!conflict || spillStart < conflict->firstRange()->from()) {
+                lastUse = lastRegisterFrom = range->from();
+                lastRegisterTo = spillStart;
+            }
         }
-    }
-
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse* use = iter->use;
-        LNode* ins = insData[iter->pos];
-
-        // Uses in the interval should be sorted.
-        MOZ_ASSERT(iter->pos >= lastUse);
-        lastUse = inputOf(ins);
-
-        if (!conflict || outputOf(ins) < conflict->start()) {
-            if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
-                lastRegisterFrom = inputOf(ins);
-                lastRegisterTo = iter->pos.next();
+
+        for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+            LUse* use = iter->use;
+            LNode* ins = insData[iter->pos];
+
+            // Uses in the bundle should be sorted.
+            MOZ_ASSERT(iter->pos >= lastUse);
+            lastUse = inputOf(ins);
+
+            if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
+                if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
+                    lastRegisterFrom = inputOf(ins);
+                    lastRegisterTo = iter->pos.next();
+                }
             }
         }
     }
 
     // Can't trim non-register uses off the end by splitting.
     if (!lastRegisterFrom.bits()) {
-        JitSpew(JitSpew_RegAlloc, "  interval has no register uses");
+        JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
         return true;
     }
-    if (lastRegisterFrom == lastUse) {
-        JitSpew(JitSpew_RegAlloc, "  interval's last use is a register use");
+    if (lastUse < lastRegisterTo) {
+        JitSpew(JitSpew_RegAlloc, "  bundle's last use is a register use");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split after last register use at %u",
             lastRegisterTo.bits());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(lastRegisterTo))
         return false;
     *success = true;
-    return splitAt(interval, splitPositions);
+    return splitAt(bundle, splitPositions);
 }
 
 bool
-BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success)
+BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success)
 {
-    // If this interval's earlier uses do not require it to be in a register,
+    // If this bundle's earlier uses do not require it to be in a register,
     // split it before the first use which does require a register. If conflict
     // is specified, only consider register uses after the conflict ends.
 
-    if (isRegisterDefinition(interval)) {
-        JitSpew(JitSpew_RegAlloc, "  interval is defined by a register");
+    if (isRegisterDefinition(bundle->firstRange())) {
+        JitSpew(JitSpew_RegAlloc, "  bundle is defined by a register");
         return true;
     }
-    if (interval->index() != 0) {
-        JitSpew(JitSpew_RegAlloc, "  interval is not defined in memory");
+    if (!bundle->firstRange()->hasDefinition()) {
+        JitSpew(JitSpew_RegAlloc, "  bundle does not have definition");
         return true;
     }
 
     CodePosition firstRegisterFrom;
 
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
-    {
-        LUse* use = iter->use;
-        LNode* ins = insData[iter->pos];
-
-        if (!conflict || outputOf(ins) >= conflict->end()) {
-            if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
-                firstRegisterFrom = inputOf(ins);
-                break;
+    CodePosition conflictEnd;
+    if (conflict) {
+        for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+            if (range->to() > conflictEnd)
+                conflictEnd = range->to();
+        }
+    }
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
+            LUse* use = iter->use;
+            LNode* ins = insData[iter->pos];
+
+            if (!conflict || outputOf(ins) >= conflictEnd) {
+                if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
+                    firstRegisterFrom = inputOf(ins);
+                    break;
+                }
             }
         }
     }
 
     if (!firstRegisterFrom.bits()) {
         // Can't trim non-register uses off the beginning by splitting.
-        JitSpew(JitSpew_RegAlloc, "  interval has no register uses");
+        JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split before first register use at %u",
             firstRegisterFrom.bits());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(firstRegisterFrom))
         return false;
     *success = true;
-    return splitAt(interval, splitPositions);
+    return splitAt(bundle, splitPositions);
 }
 
-bool
-BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval* interval)
+// When splitting a bundle according to a list of split positions, return
+// whether a use or range at |pos| should use a different bundle than the last
+// position this was called for. 
+static bool
+UseNewBundle(const SplitPositionVector& splitPositions, CodePosition pos,
+             size_t* activeSplitPosition)
 {
-    // Split this interval so that all its register uses become minimal
-    // intervals and allow the vreg to be spilled throughout its range.
-
-    LiveIntervalVector newIntervals;
-    uint32_t vreg = interval->vreg();
-
-    JitSpew(JitSpew_RegAlloc, "  split at all register uses");
-
-    // If this LiveInterval is the result of an earlier split which created a
-    // spill interval, that spill interval covers the whole range, so we don't
-    // need to create a new one.
-    bool spillIntervalIsNew = false;
-    LiveInterval* spillInterval = interval->spillInterval();
-    if (!spillInterval) {
-        spillInterval = LiveInterval::New(alloc(), vreg, 0);
-        spillIntervalIsNew = true;
+    if (splitPositions.empty()) {
+        // When the split positions are empty we are splitting at all uses.
+        return true;
     }
 
-    CodePosition spillStart = interval->start();
-    if (isRegisterDefinition(interval)) {
-        // Treat the definition of the interval as a register use so that it
-        // can be split and spilled ASAP.
-        CodePosition from = interval->start();
-        CodePosition to = minimalDefEnd(insData[from]).next();
-        if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
-            return false;
-        spillStart = to;
+    if (*activeSplitPosition == splitPositions.length()) {
+        // We've advanced past all split positions.
+        return false;
     }
 
-    if (spillIntervalIsNew) {
-        for (size_t i = 0; i < interval->numRanges(); i++) {
-            const LiveInterval::Range* range = interval->getRange(i);
-            CodePosition from = Max(range->from, spillStart);
-            if (!spillInterval->addRange(from, range->to))
-                return false;
-        }
+    if (splitPositions[*activeSplitPosition] > pos) {
+        // We haven't gotten to the next split position yet.
+        return false;
     }
 
-    for (UsePositionIterator iter(interval->usesBegin());
-         iter != interval->usesEnd();
-         iter++)
+    // We've advanced past the next split position, find the next one which we
+    // should split at.
+    while (*activeSplitPosition < splitPositions.length() &&
+           splitPositions[*activeSplitPosition] <= pos)
     {
-        LNode* ins = insData[iter->pos];
-        if (iter->pos < spillStart) {
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-        } else if (isRegisterUse(iter->use, ins)) {
-            // For register uses which are not useRegisterAtStart, pick an
-            // interval that covers both the instruction's input and output, so
-            // that the register is not reused for an output.
-            CodePosition from = inputOf(ins);
-            CodePosition to = iter->use->usedAtStart() ? outputOf(ins) : iter->pos.next();
-
-            // Use the same interval for duplicate use positions, except when
-            // the uses are fixed (they may require incompatible registers).
-            if (newIntervals.empty() ||
-                newIntervals.back()->end() != to ||
-                newIntervals.back()->usesBegin()->use->policy() == LUse::FIXED ||
-                iter->use->policy() == LUse::FIXED)
-            {
-                if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
-                    return false;
-            }
-
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-        } else {
-            MOZ_ASSERT(spillIntervalIsNew);
-            spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-        }
+        (*activeSplitPosition)++;
+    }
+    return true;
+}
+
+static bool
+HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
+{
+    MOZ_ASSERT(range->bundle() == bundle);
+
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* prevRange = LiveRange::get(*iter);
+        if (prevRange == range)
+            return false;
+        if (prevRange->vreg() == range->vreg())
+            return true;
     }
 
-    if (spillIntervalIsNew && !newIntervals.append(spillInterval))
-        return false;
-
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    MOZ_CRASH();
 }
 
-// Find the next split position after the current position.
-static size_t NextSplitPosition(size_t activeSplitPosition,
-                                const SplitPositionVector& splitPositions,
-                                CodePosition currentPos)
+static bool
+HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
 {
-    while (activeSplitPosition < splitPositions.length() &&
-           splitPositions[activeSplitPosition] <= currentPos)
-    {
-        ++activeSplitPosition;
+    MOZ_ASSERT(range->bundle() == bundle);
+
+    bool foundRange = false;
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* prevRange = LiveRange::get(*iter);
+        if (foundRange && prevRange->vreg() == range->vreg())
+            return true;
+        if (prevRange == range)
+            foundRange = true;
     }
-    return activeSplitPosition;
-}
-
-// Test whether the current position has just crossed a split point.
-static bool SplitHere(size_t activeSplitPosition,
-                      const SplitPositionVector& splitPositions,
-                      CodePosition currentPos)
-{
-    return activeSplitPosition < splitPositions.length() &&
-           currentPos >= splitPositions[activeSplitPosition];
+
+    MOZ_ASSERT(foundRange);
+    return false;
 }
 
 bool
-BacktrackingAllocator::splitAt(LiveInterval* interval,
-                               const SplitPositionVector& splitPositions)
+BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions)
 {
-    // Split the interval at the given split points. Unlike splitAtAllRegisterUses,
-    // consolidate any register uses which have no intervening split points into the
-    // same resulting interval.
-
-    // splitPositions should be non-empty and sorted.
-    MOZ_ASSERT(!splitPositions.empty());
+    // Split the bundle at the given split points. Register uses which have no
+    // intervening split points are consolidated into the same bundle. If the
+    // list of split points is empty, then all register uses are placed in
+    // minimal bundles.
+
+    // splitPositions should be sorted.
     for (size_t i = 1; i < splitPositions.length(); ++i)
         MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]);
 
-    // Don't spill the interval until after the end of its definition.
-    CodePosition spillStart = interval->start();
-    if (isRegisterDefinition(interval))
-        spillStart = minimalDefEnd(insData[interval->start()]).next();
-
-    uint32_t vreg = interval->vreg();
-
-    // If this LiveInterval is the result of an earlier split which created a
-    // spill interval, that spill interval covers the whole range, so we don't
-    // need to create a new one.
-    bool spillIntervalIsNew = false;
-    LiveInterval* spillInterval = interval->spillInterval();
-    if (!spillInterval) {
-        spillInterval = LiveInterval::New(alloc(), vreg, 0);
-        spillIntervalIsNew = true;
-
-        for (size_t i = 0; i < interval->numRanges(); i++) {
-            const LiveInterval::Range* range = interval->getRange(i);
-            CodePosition from = Max(range->from, spillStart);
-            if (!spillInterval->addRange(from, range->to))
-                return false;
+    // We don't need to create a new spill bundle if there already is one.
+    bool spillBundleIsNew = false;
+    LiveBundle* spillBundle = bundle->spillParent();
+    if (!spillBundle) {
+        spillBundle = LiveBundle::New(alloc(), bundle->spillSet(), nullptr);
+        if (!spillBundle)
+            return false;
+        spillBundleIsNew = true;
+
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            CodePosition from = range->from();
+            if (isRegisterDefinition(range))
+                from = minimalDefEnd(insData[from]).next();
+
+            if (from < range->to()) {
+                if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
+                    return false;
+
+                if (range->hasDefinition() && !isRegisterDefinition(range))
+                    spillBundle->lastRange()->setHasDefinition();
+            }
         }
     }
 
-    LiveIntervalVector newIntervals;
-
-    CodePosition lastRegisterUse;
-    if (spillStart != interval->start()) {
-        LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0);
-        newInterval->setSpillInterval(spillInterval);
-        if (!newIntervals.append(newInterval))
+    LiveBundleVector newBundles;
+
+    // The bundle which ranges are currently being added to.
+    LiveBundle* activeBundle = LiveBundle::New(alloc(), bundle->spillSet(), spillBundle);
+    if (!newBundles.append(activeBundle))
+        return false;
+
+    // State for use by UseNewBundle.
+    size_t activeSplitPosition = 0;
+
+    // Make new bundles according to the split positions, and distribute ranges
+    // and uses to them.
+    for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
+        LiveRange* range = LiveRange::get(*iter);
+
+        if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
+            activeBundle = LiveBundle::New(alloc(), bundle->spillSet(), spillBundle);
+            if (!newBundles.append(activeBundle))
+                return false;
+        }
+
+        LiveRange* activeRange = LiveRange::New(alloc(), range->vreg(), range->from(), range->to());
+        if (!activeRange)
             return false;
-        lastRegisterUse = interval->start();
-    }
-
-    size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start());
-    for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
-        LNode* ins = insData[iter->pos];
-        if (iter->pos < spillStart) {
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-            activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos);
-        } else if (isRegisterUse(iter->use, ins)) {
-            if (lastRegisterUse.bits() == 0 ||
-                SplitHere(activeSplitPosition, splitPositions, iter->pos))
-            {
-                // Place this register use into a different interval from the
+        activeBundle->addRange(activeRange);
+
+        if (isRegisterDefinition(range))
+            activeRange->setHasDefinition();
+
+        while (range->hasUses()) {
+            UsePosition* use = range->popUse();
+            LNode* ins = insData[use->pos];
+
+            // Any uses of a register that appear before its definition has
+            // finished must be associated with the range for that definition.
+            if (isRegisterDefinition(range) && use->pos <= minimalDefEnd(insData[range->from()])) {
+                activeRange->addUse(use);
+            } else if (isRegisterUse(use->use, ins)) {
+                // Place this register use into a different bundle from the
                 // last one if there are any split points between the two uses.
-                LiveInterval* newInterval = LiveInterval::New(alloc(), vreg, 0);
-                newInterval->setSpillInterval(spillInterval);
-                if (!newIntervals.append(newInterval))
-                    return false;
-                activeSplitPosition = NextSplitPosition(activeSplitPosition,
-                                                        splitPositions,
-                                                        iter->pos);
+                // UseNewBundle always returns true if we are splitting at all
+                // register uses, but we can still reuse the last range and
+                // bundle if they have uses at the same position, except when
+                // either use is fixed (the two uses might require incompatible
+                // registers.)
+                if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
+                    (!activeRange->hasUses() ||
+                     activeRange->usesBegin()->pos != use->pos ||
+                     activeRange->usesBegin()->use->policy() == LUse::FIXED ||
+                     use->use->policy() == LUse::FIXED))
+                {
+                    activeBundle = LiveBundle::New(alloc(), bundle->spillSet(), spillBundle);
+                    if (!newBundles.append(activeBundle))
+                        return false;
+                    activeRange = LiveRange::New(alloc(), range->vreg(), range->from(), range->to());
+                    if (!activeRange)
+                        return false;
+                    activeBundle->addRange(activeRange);
+                }
+
+                activeRange->addUse(use);
+            } else {
+                MOZ_ASSERT(spillBundleIsNew);
+                spillBundle->rangeFor(use->pos)->addUse(use);
             }
-            newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
-            lastRegisterUse = iter->pos;
-        } else {
-            MOZ_ASSERT(spillIntervalIsNew);
-            spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
         }
     }
 
-    // Compute ranges for each new interval that cover all its uses.
-    size_t activeRange = interval->numRanges();
-    for (size_t i = 0; i < newIntervals.length(); i++) {
-        LiveInterval* newInterval = newIntervals[i];
-        CodePosition start, end;
-        if (i == 0 && spillStart != interval->start()) {
-            start = interval->start();
-            if (newInterval->usesEmpty())
-                end = spillStart;
-            else
-                end = newInterval->usesBack()->pos.next();
-        } else {
-            start = inputOf(insData[newInterval->usesBegin()->pos]);
-            end = newInterval->usesBack()->pos.next();
+    LiveBundleVector filteredBundles;
+
+    // Trim the ends of ranges in each new bundle when there are no other
+    // earlier or later ranges in the same bundle with the same vreg.
+    for (size_t i = 0; i < newBundles.length(); i++) {
+        LiveBundle* bundle = newBundles[i];
+
+        for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; ) {
+            LiveRange* range = LiveRange::get(*iter);
+
+            if (!range->hasDefinition()) {
+                if (!HasPrecedingRangeSharingVreg(bundle, range)) {
+                    if (range->hasUses()) {
+                        UsePosition* use = *range->usesBegin();
+                        range->setFrom(inputOf(insData[use->pos]));
+                    } else {
+                        bundle->removeRangeAndIncrementIterator(iter);
+                        continue;
+                    }
+                }
+            }
+
+            if (!HasFollowingRangeSharingVreg(bundle, range)) {
+                if (range->hasUses()) {
+                    UsePosition* use = range->lastUse();
+                    range->setTo(use->pos.next());
+                } else if (range->hasDefinition()) {
+                    range->setTo(minimalDefEnd(insData[range->from()]).next());
+                } else {
+                    bundle->removeRangeAndIncrementIterator(iter);
+                    continue;
+                }
+            }
+
+            iter++;
         }
-        for (; activeRange > 0; --activeRange) {
-            const LiveInterval::Range* range = interval->getRange(activeRange - 1);
-            if (range->to <= start)
-                continue;
-            if (range->from >= end)
-                break;
-            if (!newInterval->addRange(Max(range->from, start),
-                                       Min(range->to, end)))
-                return false;
-            if (range->to >= end)
-                break;
-        }
+
+        if (bundle->hasRanges() && !filteredBundles.append(bundle))
+            return false;
     }
 
-    if (spillIntervalIsNew && !newIntervals.append(spillInterval))
+    if (spillBundleIsNew && !filteredBundles.append(spillBundle))
         return false;
 
-    return split(interval, newIntervals) && requeueIntervals(newIntervals);
+    return splitAndRequeueBundles(bundle, filteredBundles);
 }
 
 bool
-BacktrackingAllocator::splitAcrossCalls(LiveInterval* interval)
+BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle)
 {
-    // Split the interval to separate register uses and non-register uses and
+    // Split the bundle to separate register uses and non-register uses and
     // allow the vreg to be spilled across its range.
 
-    // Find the locations of all calls in the interval's range. Fixed intervals
-    // are introduced by buildLivenessInfo only for calls when allocating for
-    // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so
-    // iterate through it backwards.
+    // Find the locations of all calls in the bundle's range.
     SplitPositionVector callPositions;
-    for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) {
-        const LiveInterval::Range* range = fixedIntervalsUnion->getRange(i - 1);
-        if (interval->covers(range->from) && interval->covers(range->from.previous())) {
-            if (!callPositions.append(range->from))
+    for (LiveRange::BundleLinkIterator iter = callRanges->rangesBegin(); iter; iter++) {
+        LiveRange* callRange = LiveRange::get(*iter);
+        if (bundle->rangeFor(callRange->from()) && bundle->rangeFor(callRange->from().previous())) {
+            if (!callPositions.append(callRange->from()))
                 return false;
         }
     }
     MOZ_ASSERT(callPositions.length());
 
 #ifdef DEBUG
     JitSpewStart(JitSpew_RegAlloc, "  split across calls at ");
-    for (size_t i = 0; i < callPositions.length(); ++i) {
+    for (size_t i = 0; i < callPositions.length(); ++i)
         JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits());
-    }
     JitSpewFin(JitSpew_RegAlloc);
 #endif
 
-    return splitAt(interval, callPositions);
+    return splitAt(bundle, callPositions);
 }
 
 bool
-BacktrackingAllocator::chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict)
+BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict)
 {
     bool success = false;
 
-    if (!trySplitAcrossHotcode(interval, &success))
+    if (!trySplitAcrossHotcode(bundle, &success))
         return false;
     if (success)
         return true;
 
     if (fixed)
-        return splitAcrossCalls(interval);
-
-    if (!trySplitBeforeFirstRegisterUse(interval, conflict, &success))
+        return splitAcrossCalls(bundle);
+
+    if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success))
         return false;
     if (success)
         return true;
 
-    if (!trySplitAfterLastRegisterUse(interval, conflict, &success))
+    if (!trySplitAfterLastRegisterUse(bundle, conflict, &success))
         return false;
     if (success)
         return true;
 
-    return splitAtAllRegisterUses(interval);
+    // Split at all register uses.
+    SplitPositionVector emptyPositions;
+    return splitAt(bundle, emptyPositions);
 }
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -6,289 +6,734 @@
 
 #ifndef jit_BacktrackingAllocator_h
 #define jit_BacktrackingAllocator_h
 
 #include "mozilla/Array.h"
 
 #include "ds/PriorityQueue.h"
 #include "ds/SplayTree.h"
-#include "jit/LiveRangeAllocator.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/StackSlotAllocator.h"
 
 // Backtracking priority queue based register allocator based on that described
 // in the following blog post:
 //
 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
 
 namespace js {
 namespace jit {
 
-// Information about a group of registers. Registers may be grouped together
-// when (a) all of their lifetimes are disjoint, (b) they are of the same type
-// (double / non-double) and (c) it is desirable that they have the same
-// allocation.
-struct VirtualRegisterGroup : public TempObject
+class Requirement
 {
-    // All virtual registers in the group.
-    Vector<uint32_t, 2, JitAllocPolicy> registers;
+  public:
+    enum Kind {
+        NONE,
+        REGISTER,
+        FIXED,
+        MUST_REUSE_INPUT
+    };
+
+    Requirement()
+      : kind_(NONE)
+    { }
+
+    explicit Requirement(Kind kind)
+      : kind_(kind)
+    {
+        // These have dedicated constructors.
+        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+    }
+
+    Requirement(Kind kind, CodePosition at)
+      : kind_(kind),
+        position_(at)
+    {
+        // These have dedicated constructors.
+        MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
+    }
 
-    // Desired physical register to use for registers in the group.
-    LAllocation allocation;
+    explicit Requirement(LAllocation fixed)
+      : kind_(FIXED),
+        allocation_(fixed)
+    {
+        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+    }
+
+    // Only useful as a hint, encodes where the fixed requirement is used to
+    // avoid allocating a fixed register too early.
+    Requirement(LAllocation fixed, CodePosition at)
+      : kind_(FIXED),
+        allocation_(fixed),
+        position_(at)
+    {
+        MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
+    }
+
+    Requirement(uint32_t vreg, CodePosition at)
+      : kind_(MUST_REUSE_INPUT),
+        allocation_(LUse(vreg, LUse::ANY)),
+        position_(at)
+    { }
+
+    Kind kind() const {
+        return kind_;
+    }
 
-    // Spill location to be shared by registers in the group.
-    LAllocation spill;
+    LAllocation allocation() const {
+        MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
+        return allocation_;
+    }
+
+    uint32_t virtualRegister() const {
+        MOZ_ASSERT(allocation_.isUse());
+        MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
+        return allocation_.toUse()->virtualRegister();
+    }
+
+    CodePosition pos() const {
+        return position_;
+    }
+
+    int priority() const;
+
+    bool merge(const Requirement& newRequirement) {
+        // Merge newRequirement with any existing requirement, returning false
+        // if the new and old requirements conflict.
+        MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
+
+        if (newRequirement.kind() == Requirement::FIXED) {
+            if (kind() == Requirement::FIXED)
+                return newRequirement.allocation() == allocation();
+            *this = newRequirement;
+            return true;
+        }
 
-    explicit VirtualRegisterGroup(TempAllocator& alloc)
-      : registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
-    {}
+        MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
+        if (kind() == Requirement::FIXED)
+            return allocation().isRegister();
+
+        *this = newRequirement;
+        return true;
+    }
+
+    void dump() const;
+
+  private:
+    Kind kind_;
+    LAllocation allocation_;
+    CodePosition position_;
+};
 
-    uint32_t canonicalReg() {
-        uint32_t minimum = registers[0];
-        for (size_t i = 1; i < registers.length(); i++)
-            minimum = Min(minimum, registers[i]);
-        return minimum;
+struct UsePosition : public TempObject,
+                     public InlineForwardListNode<UsePosition>
+{
+    LUse* use;
+    CodePosition pos;
+
+    UsePosition(LUse* use, CodePosition pos) :
+        use(use),
+        pos(pos)
+    {
+        // Verify that the usedAtStart() flag is consistent with the
+        // subposition. For now ignore fixed registers, because they
+        // are handled specially around calls.
+        MOZ_ASSERT_IF(!use->isFixedRegister(),
+                      pos.subpos() == (use->usedAtStart()
+                                       ? CodePosition::INPUT
+                                       : CodePosition::OUTPUT));
     }
 };
 
-class BacktrackingVirtualRegister : public VirtualRegister
+typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
+
+// Backtracking allocator data structures overview.
+//
+// LiveRange: A continuous range of positions where a virtual register is live.
+// LiveBundle: A set of LiveRanges which do not overlap.
+// VirtualRegister: A set of all LiveRanges used for some LDefinition.
+//
+// The allocator first performs a liveness ananlysis on the LIR graph which
+// constructs LiveRanges for each VirtualRegister, determining where the
+// registers are live.
+//
+// The ranges are then bundled together according to heuristics, and placed on
+// the allocation queue.
+//
+// As bundles are removed from the allocation queue, we attempt to find a
+// physical register or stack slot allocation for all ranges in the removed
+// bundle, possibly evicting already-allocated bundles. See processBundle()
+// for details.
+//
+// If we are not able to allocate a bundle, it is split according to heuristics
+// into two or more smaller bundles which cover all the ranges of the original.
+// These smaller bundles are then allocated independently.
+
+class LiveBundle;
+
+class LiveRange : public TempObject
 {
+  public:
+    // Linked lists are used to keep track of the ranges in each LiveBundle and
+    // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
+    // these auxiliary classes to keep things straight.
+    class BundleLink : public InlineForwardListNode<BundleLink> {};
+    class RegisterLink : public InlineForwardListNode<RegisterLink> {};
+
+    typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
+    typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
+
+    // Links in the lists in LiveBundle and VirtualRegister.
+    BundleLink bundleLink;
+    RegisterLink registerLink;
+
+    static LiveRange* get(BundleLink* link) {
+        return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+                                            offsetof(LiveRange, bundleLink));
+    }
+    static LiveRange* get(RegisterLink* link) {
+        return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
+                                            offsetof(LiveRange, registerLink));
+    }
+
+    struct Range
+    {
+        // The beginning of this range, inclusive.
+        CodePosition from;
+
+        // The end of this range, exclusive.
+        CodePosition to;
+
+        Range() {}
+
+        Range(CodePosition from, CodePosition to)
+          : from(from), to(to)
+        {
+            MOZ_ASSERT(!empty());
+        }
+
+        bool empty() {
+            MOZ_ASSERT(from <= to);
+            return from == to;
+        }
+    };
+
+  private:
+    // The virtual register this range is for, or zero if this does not have a
+    // virtual register (for example, it is in the callRanges bundle).
+    uint32_t vreg_;
+
+    // The bundle containing this range, null if liveness information is being
+    // constructed and we haven't started allocating bundles yet.
+    LiveBundle* bundle_;
+
+    // The code positions in this range.
+    Range range_;
+
+    // All uses of the virtual register in this range, ordered by location.
+    InlineForwardList<UsePosition> uses_;
+
+    // Whether this range contains the virtual register's definition.
+    bool hasDefinition_;
+
+    LiveRange(uint32_t vreg, Range range)
+      : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
+    {
+        MOZ_ASSERT(!range.empty());
+    }
+
+  public:
+    static LiveRange* New(TempAllocator& alloc, uint32_t vreg,
+                          CodePosition from, CodePosition to) {
+        return new(alloc) LiveRange(vreg, Range(from, to));
+    }
+
+    uint32_t vreg() const {
+        MOZ_ASSERT(hasVreg());
+        return vreg_;
+    }
+    bool hasVreg() const {
+        return vreg_ != 0;
+    }
+
+    LiveBundle* bundle() const {
+        return bundle_;
+    }
+
+    CodePosition from() const {
+        return range_.from;
+    }
+    CodePosition to() const {
+        return range_.to;
+    }
+    bool covers(CodePosition pos) const {
+        return pos >= from() && pos < to();
+    }
+
+    // Whether this range wholly contains other.
+    bool contains(LiveRange* other) const;
+
+    // Intersect this range with other, returning the subranges of this
+    // that are before, inside, or after other.
+    void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
+
+    // Whether this range has any intersection with other.
+    bool intersects(LiveRange* other) const;
+
+    UsePositionIterator usesBegin() const {
+        return uses_.begin();
+    }
+    UsePosition* lastUse() const {
+        return uses_.back();
+    }
+    bool hasUses() const {
+        return !!usesBegin();
+    }
+    UsePosition* popUse() {
+        return uses_.popFront();
+    }
+
+    bool hasDefinition() const {
+        return hasDefinition_;
+    }
+
+    void setFrom(CodePosition from) {
+        range_.from = from;
+        MOZ_ASSERT(!range_.empty());
+    }
+    void setTo(CodePosition to) {
+        range_.to = to;
+        MOZ_ASSERT(!range_.empty());
+    }
+
+    void setBundle(LiveBundle* bundle) {
+        bundle_ = bundle;
+    }
+
+    void addUse(UsePosition* use);
+    void distributeUses(LiveRange* other);
+
+    void setHasDefinition() {
+        MOZ_ASSERT(!hasDefinition_);
+        hasDefinition_ = true;
+    }
+
+    // Return a string describing this range. This is not re-entrant!
+#ifdef DEBUG
+    const char* toString() const;
+#else
+    const char* toString() const { return "???"; }
+#endif
+
+    // Comparator for use in range splay trees.
+    static int compare(LiveRange* v0, LiveRange* v1) {
+        // LiveRange includes 'from' but excludes 'to'.
+        if (v0->to() <= v1->from())
+            return -1;
+        if (v0->from() >= v1->to())
+            return 1;
+        return 0;
+    }
+};
+
+// Tracks information about bundles that should all be spilled to the same
+// physical location. At the beginning of allocation, each bundle has its own
+// spill set. As bundles are split, the new smaller bundles continue to use the
+// same spill set.
+class SpillSet : public TempObject
+{
+    // All bundles with this spill set which have been spilled. All bundles in
+    // this list will be given the same physical slot.
+    Vector<LiveBundle*, 1, JitAllocPolicy> list_;
+
+    explicit SpillSet(TempAllocator& alloc)
+      : list_(alloc)
+    { }
+
+  public:
+    static SpillSet* New(TempAllocator& alloc) {
+        return new(alloc) SpillSet(alloc);
+    }
+
+    bool addSpilledBundle(LiveBundle* bundle) {
+        return list_.append(bundle);
+    }
+    size_t numSpilledBundles() const {
+        return list_.length();
+    }
+    LiveBundle* spilledBundle(size_t i) const {
+        return list_[i];
+    }
+
+    void setAllocation(LAllocation alloc);
+};
+
+// A set of live ranges which are all pairwise disjoint. The register allocator
+// attempts to find allocations for an entire bundle, and if it fails the
+// bundle will be broken into smaller ones which are allocated independently.
+class LiveBundle : public TempObject
+{
+    // Set to use if this bundle or one it is split into is spilled.
+    SpillSet* spill_;
+
+    // All the ranges in this set, ordered by location.
+    InlineForwardList<LiveRange::BundleLink> ranges_;
+
+    // Allocation to use for ranges in this set, bogus if unallocated or spilled
+    // and not yet given a physical stack slot.
+    LAllocation alloc_;
+
+    // Bundle which entirely contains this one and has no register uses. This
+    // may or may not be spilled by the allocator, but it can be spilled and
+    // will not be split.
+    LiveBundle* spillParent_;
+
+    LiveBundle(SpillSet* spill, LiveBundle* spillParent)
+      : spill_(spill), spillParent_(spillParent)
+    { }
+
+  public:
+    static LiveBundle* New(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent) {
+        return new(alloc) LiveBundle(spill, spillParent);
+    }
+
+    SpillSet* spillSet() const {
+        return spill_;
+    }
+    void setSpillSet(SpillSet* spill) {
+        spill_ = spill;
+    }
+
+    LiveRange::BundleLinkIterator rangesBegin() const {
+        return ranges_.begin();
+    }
+    bool hasRanges() const {
+        return !!rangesBegin();
+    }
+    LiveRange* firstRange() const {
+        return LiveRange::get(*rangesBegin());
+    }
+    LiveRange* lastRange() const {
+        return LiveRange::get(ranges_.back());
+    }
+    LiveRange* rangeFor(CodePosition pos) const;
+    void removeRange(LiveRange* range);
+    void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
+        ranges_.removeAndIncrement(iter);
+    }
+    void addRange(LiveRange* range);
+    bool addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to);
+    bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
+                                   CodePosition from, CodePosition to);
+    LiveRange* popFirstRange();
+#ifdef DEBUG
+    size_t numRanges() const;
+#endif
+
+    LAllocation allocation() const {
+        return alloc_;
+    }
+    void setAllocation(LAllocation alloc) {
+        alloc_ = alloc;
+    }
+
+    LiveBundle* spillParent() const {
+        return spillParent_;
+    }
+
+    // Return a string describing this bundle. This is not re-entrant!
+#ifdef DEBUG
+    const char* toString() const;
+#else
+    const char* toString() const { return "???"; }
+#endif
+};
+
+// Information about the allocation for a virtual register.
+class VirtualRegister
+{
+    // Instruction which defines this register.
+    LNode* ins_;
+
+    // Definition in the instruction for this register.
+    LDefinition* def_;
+
+    // All live ranges for this register. These may overlap each other, and are
+    // ordered by their start position.
+    InlineForwardList<LiveRange::RegisterLink> ranges_;
+
+    // Whether def_ is a temp or an output.
+    bool isTemp_;
+
     // If this register's definition is MUST_REUSE_INPUT, whether a copy must
     // be introduced before the definition that relaxes the policy.
     bool mustCopyInput_;
 
-    // Spill location to use for this register.
-    LAllocation canonicalSpill_;
-
-    // Code position above which the canonical spill cannot be used; such
-    // intervals may overlap other registers in the same group.
-    CodePosition canonicalSpillExclude_;
-
-    // If this register is associated with a group of other registers,
-    // information about the group. This structure is shared between all
-    // registers in the group.
-    VirtualRegisterGroup* group_;
+    void operator=(const VirtualRegister&) = delete;
+    VirtualRegister(const VirtualRegister&) = delete;
 
   public:
-    explicit BacktrackingVirtualRegister(TempAllocator& alloc)
-      : VirtualRegister(alloc)
-    {}
+    explicit VirtualRegister()
+    {
+        // Note: This class is zeroed before it is constructed.
+    }
+
+    void init(LNode* ins, LDefinition* def, bool isTemp) {
+        MOZ_ASSERT(!ins_);
+        ins_ = ins;
+        def_ = def;
+        isTemp_ = isTemp;
+    }
+
+    LNode* ins() const {
+        return ins_;
+    }
+    LDefinition* def() const {
+        return def_;
+    }
+    LDefinition::Type type() const {
+        return def()->type();
+    }
+    uint32_t vreg() const {
+        return def()->virtualRegister();
+    }
+    bool isCompatible(const AnyRegister& r) const {
+        return def_->isCompatibleReg(r);
+    }
+    bool isCompatible(const VirtualRegister& vr) const {
+        return def_->isCompatibleDef(*vr.def_);
+    }
+    bool isTemp() const {
+        return isTemp_;
+    }
+
     void setMustCopyInput() {
         mustCopyInput_ = true;
     }
     bool mustCopyInput() {
         return mustCopyInput_;
     }
 
-    void setCanonicalSpill(LAllocation alloc) {
-        MOZ_ASSERT(!alloc.isUse());
-        canonicalSpill_ = alloc;
+    LiveRange::RegisterLinkIterator rangesBegin() const {
+        return ranges_.begin();
+    }
+    bool hasRanges() const {
+        return !!rangesBegin();
+    }
+    LiveRange* firstRange() const {
+        return LiveRange::get(*rangesBegin());
     }
-    const LAllocation* canonicalSpill() const {
-        return canonicalSpill_.isBogus() ? nullptr : &canonicalSpill_;
+    LiveRange* lastRange() const {
+        return LiveRange::get(ranges_.back());
+    }
+    LiveRange* rangeFor(CodePosition pos) const;
+    void removeRange(LiveRange* range);
+    void addRange(LiveRange* range);
+
+    LiveBundle* firstBundle() const {
+        return firstRange()->bundle();
     }
 
-    void setCanonicalSpillExclude(CodePosition pos) {
-        canonicalSpillExclude_ = pos;
-    }
-    bool hasCanonicalSpillExclude() const {
-        return canonicalSpillExclude_.bits() != 0;
-    }
-    CodePosition canonicalSpillExclude() const {
-        MOZ_ASSERT(hasCanonicalSpillExclude());
-        return canonicalSpillExclude_;
-    }
-
-    void setGroup(VirtualRegisterGroup* group) {
-        group_ = group;
-    }
-    VirtualRegisterGroup* group() {
-        return group_;
-    }
+    bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
+    void addInitialUse(UsePosition* use);
+    void setInitialDefinition(CodePosition from);
 };
 
 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
 // where to split.
 typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
 
-class BacktrackingAllocator
-  : private LiveRangeAllocator<BacktrackingVirtualRegister>
+class BacktrackingAllocator : protected RegisterAllocator
 {
     friend class C1Spewer;
     friend class JSONSpewer;
 
-    // Priority queue element: either an interval or group of intervals and the
-    // associated priority.
+    BitSet* liveIn;
+    FixedList<VirtualRegister> vregs;
+
+    // Ranges where all registers must be spilled due to call instructions.
+    LiveBundle* callRanges;
+
+    // Allocation state.
+    StackSlotAllocator stackSlotAllocator;
+
+    // Priority queue element: a bundle and the associated priority.
     struct QueueItem
     {
-        LiveInterval* interval;
-        VirtualRegisterGroup* group;
+        LiveBundle* bundle;
 
-        QueueItem(LiveInterval* interval, size_t priority)
-          : interval(interval), group(nullptr), priority_(priority)
-        {}
-
-        QueueItem(VirtualRegisterGroup* group, size_t priority)
-          : interval(nullptr), group(group), priority_(priority)
+        QueueItem(LiveBundle* bundle, size_t priority)
+          : bundle(bundle), priority_(priority)
         {}
 
         static size_t priority(const QueueItem& v) {
             return v.priority_;
         }
 
       private:
         size_t priority_;
     };
 
     PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
 
-    // A subrange over which a physical register is allocated.
-    struct AllocatedRange {
-        LiveInterval* interval;
-        const LiveInterval::Range* range;
-
-        AllocatedRange()
-          : interval(nullptr), range(nullptr)
-        {}
-
-        AllocatedRange(LiveInterval* interval, const LiveInterval::Range* range)
-          : interval(interval), range(range)
-        {}
-
-        static int compare(const AllocatedRange& v0, const AllocatedRange& v1) {
-            // LiveInterval::Range includes 'from' but excludes 'to'.
-            if (v0.range->to <= v1.range->from)
-                return -1;
-            if (v0.range->from >= v1.range->to)
-                return 1;
-            return 0;
-        }
-    };
-
-    typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
+    typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
 
     // Each physical register is associated with the set of ranges over which
     // that register is currently allocated.
     struct PhysicalRegister {
         bool allocatable;
         AnyRegister reg;
-        AllocatedRangeSet allocations;
+        LiveRangeSet allocations;
 
         PhysicalRegister() : allocatable(false) {}
     };
     mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
 
     // Ranges of code which are considered to be hot, for which good allocation
     // should be prioritized.
-    AllocatedRangeSet hotcode;
-
-    // During register allocation, virtual stack slots are used for spills.
-    // These are converted to actual spill locations
-    size_t numVirtualStackSlots;
+    LiveRangeSet hotcode;
 
     // Information about an allocated stack slot.
     struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
         LStackSlot alloc;
-        AllocatedRangeSet allocated;
+        LiveRangeSet allocated;
 
         SpillSlot(uint32_t slot, LifoAlloc* alloc)
           : alloc(slot), allocated(alloc)
         {}
     };
     typedef InlineForwardList<SpillSlot> SpillSlotList;
 
     // All allocated slots of each width.
     SpillSlotList normalSlots, doubleSlots, quadSlots;
 
   public:
     BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
-      : LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph),
-        numVirtualStackSlots(0)
+      : RegisterAllocator(mir, lir, graph),
+        liveIn(nullptr),
+        callRanges(nullptr)
     { }
 
     bool go();
 
   private:
 
-    typedef Vector<LiveInterval*, 4, SystemAllocPolicy> LiveIntervalVector;
+    typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
+    typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
 
+    // Liveness methods.
     bool init();
-    bool canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg);
-    bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
-    bool tryGroupReusedRegister(uint32_t def, uint32_t use);
-    bool groupAndQueueRegisters();
-    bool tryAllocateFixed(LiveInterval* interval, bool* success, bool* pfixed,
-                          LiveIntervalVector& conflicting);
-    bool tryAllocateNonFixed(LiveInterval* interval, bool* success, bool* pfixed,
-                             LiveIntervalVector& conflicting);
-    bool processInterval(LiveInterval* interval);
-    bool processGroup(VirtualRegisterGroup* group);
-    bool setIntervalRequirement(LiveInterval* interval);
-    bool tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
-                             bool* success, bool* pfixed, LiveIntervalVector& conflicting);
-    bool tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
-                                  bool* psuccess, bool* pfixed, LiveInterval** pconflicting);
-    bool evictInterval(LiveInterval* interval);
-    void distributeUses(LiveInterval* interval, const LiveIntervalVector& newIntervals);
-    bool split(LiveInterval* interval, const LiveIntervalVector& newIntervals);
-    bool requeueIntervals(const LiveIntervalVector& newIntervals);
-    void spill(LiveInterval* interval);
+    bool buildLivenessInfo();
+
+    bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
+
+    VirtualRegister& vreg(const LDefinition* def) {
+        return vregs[def->virtualRegister()];
+    }
+    VirtualRegister& vreg(const LAllocation* alloc) {
+        MOZ_ASSERT(alloc->isUse());
+        return vregs[alloc->toUse()->virtualRegister()];
+    }
+
+    // Allocation methods.
+    bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
+    bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input);
+    bool mergeAndQueueRegisters();
+    bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
+                          bool* success, bool* pfixed, LiveBundleVector& conflicting);
+    bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, Requirement hint,
+                             bool* success, bool* pfixed, LiveBundleVector& conflicting);
+    bool processBundle(LiveBundle* bundle);
+    bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, Requirement *phint);
+    bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
+                             bool* success, bool* pfixed, LiveBundleVector& conflicting);
+    bool evictBundle(LiveBundle* bundle);
+    bool splitAndRequeueBundles(LiveBundle* bundle, const LiveBundleVector& newBundles);
+    bool spill(LiveBundle* bundle);
 
     bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
     bool isRegisterUse(LUse* use, LNode* ins, bool considerCopy = false);
-    bool isRegisterDefinition(LiveInterval* interval);
-    bool addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
-                         LiveInterval* spillInterval,
-                         CodePosition from, CodePosition to);
-    bool pickStackSlot(LiveInterval* interval);
-    bool reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
-                                  LAllocation* palloc);
-    bool insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals);
+    bool isRegisterDefinition(LiveRange* range);
+    bool pickStackSlot(SpillSet* spill);
+    bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
 
+    // Reification methods.
     bool pickStackSlots();
     bool resolveControlFlow();