Merge inbound to m-c
authorWes Kocher <wkocher@mozilla.com>
Fri, 14 Mar 2014 22:20:25 -0700
changeset 190922 82c90c17fc954db2229e051ddc7072888899aaf8
parent 190837 092d63342910dea2db9332dc6a2b95ba0b0caa8a (current diff)
parent 190921 f19c420744bd384e599a2029e95919f84c07eff4 (diff)
child 190923 2cb90e6d143f0de6b3242dc63286d9ef65e4a452
child 190940 cce83256faa250b8b85e9de68fa82c485b463d0e
child 190945 aa52fe58c9cb61508440d89457f361ca9d898deb
child 190958 ffa3c24401086bfd3bd4c0865d21936d8171a593
push id3503
push userraliiev@mozilla.com
push dateMon, 28 Apr 2014 18:51:11 +0000
treeherdermozilla-beta@c95ac01e332e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone30.0a1
first release with
nightly linux32
82c90c17fc95 / 30.0a1 / 20140315030204 / files
nightly linux64
82c90c17fc95 / 30.0a1 / 20140315030204 / files
nightly mac
82c90c17fc95 / 30.0a1 / 20140315030204 / files
nightly win32
82c90c17fc95 / 30.0a1 / 20140315030204 / files
nightly win64
82c90c17fc95 / 30.0a1 / 20140315030204 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to m-c
b2g/app/b2g.js
browser/installer/package-manifest.in
browser/modules/webappsUI.jsm
js/src/jsapi-tests/testMappedArrayBuffer.cpp
services/sync/tps/extensions/tps/modules/addons.jsm
services/sync/tps/extensions/tps/modules/bookmarks.jsm
services/sync/tps/extensions/tps/modules/forms.jsm
services/sync/tps/extensions/tps/modules/fxaccounts.jsm
services/sync/tps/extensions/tps/modules/history.jsm
services/sync/tps/extensions/tps/modules/logger.jsm
services/sync/tps/extensions/tps/modules/passwords.jsm
services/sync/tps/extensions/tps/modules/prefs.jsm
services/sync/tps/extensions/tps/modules/quit.js
services/sync/tps/extensions/tps/modules/sync.jsm
services/sync/tps/extensions/tps/modules/tabs.jsm
services/sync/tps/extensions/tps/modules/tps.jsm
services/sync/tps/extensions/tps/modules/windows.jsm
testing/tps/config/README.txt
testing/tps/tps/mozhttpd.py
testing/tps/tps/thread.py
toolkit/webapps/WebappsInstaller.jsm
toolkit/xre/nsWindowsDllInterceptor.h
tools/profiler/IOInterposer.cpp
tools/profiler/IOInterposer.h
tools/profiler/NSPRInterposer.cpp
tools/profiler/NSPRInterposer.h
webapprt/WebappsHandler.jsm
widget/gtk/nsNativeKeyBindings.cpp
widget/gtk/nsNativeKeyBindings.h
widget/nsINativeKeyBindings.h
--- a/accessible/src/xul/XULTreeGridAccessible.cpp
+++ b/accessible/src/xul/XULTreeGridAccessible.cpp
@@ -392,36 +392,33 @@ void
 XULTreeGridRowAccessible::RowInvalidated(int32_t aStartColIdx,
                                          int32_t aEndColIdx)
 {
   nsCOMPtr<nsITreeColumns> treeColumns;
   mTree->GetColumns(getter_AddRefs(treeColumns));
   if (!treeColumns)
     return;
 
+  bool nameChanged = false;
   for (int32_t colIdx = aStartColIdx; colIdx <= aEndColIdx; ++colIdx) {
     nsCOMPtr<nsITreeColumn> column;
     treeColumns->GetColumnAt(colIdx, getter_AddRefs(column));
     if (column && !nsCoreUtils::IsColumnHidden(column)) {
       Accessible* cellAccessible = GetCellAccessible(column);
       if (cellAccessible) {
         nsRefPtr<XULTreeGridCellAccessible> cellAcc = do_QueryObject(cellAccessible);
 
-        cellAcc->CellInvalidated();
+        nameChanged |= cellAcc->CellInvalidated();
       }
     }
   }
 
-  nsAutoString name;
-  Name(name);
+  if (nameChanged)
+    nsEventShell::FireEvent(nsIAccessibleEvent::EVENT_NAME_CHANGE, this);
 
-  if (name != mCachedName) {
-    nsEventShell::FireEvent(nsIAccessibleEvent::EVENT_NAME_CHANGE, this);
-    mCachedName = name;
-  }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 // XULTreeGridRowAccessible: Accessible protected implementation
 
 void
 XULTreeGridRowAccessible::CacheChildren()
 {
@@ -758,43 +755,47 @@ Relation
 XULTreeGridCellAccessible::RelationByType(RelationType aType)
 {
   return Relation();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 // XULTreeGridCellAccessible: public implementation
 
-void
+bool
 XULTreeGridCellAccessible::CellInvalidated()
 {
 
   nsAutoString textEquiv;
 
   int16_t type;
   mColumn->GetType(&type);
   if (type == nsITreeColumn::TYPE_CHECKBOX) {
     mTreeView->GetCellValue(mRow, mColumn, textEquiv);
     if (mCachedTextEquiv != textEquiv) {
       bool isEnabled = textEquiv.EqualsLiteral("true");
       nsRefPtr<AccEvent> accEvent =
         new AccStateChangeEvent(this, states::CHECKED, isEnabled);
       nsEventShell::FireEvent(accEvent);
 
       mCachedTextEquiv = textEquiv;
+      return true;
     }
 
-    return;
+    return false;
   }
 
   mTreeView->GetCellText(mRow, mColumn, textEquiv);
   if (mCachedTextEquiv != textEquiv) {
     nsEventShell::FireEvent(nsIAccessibleEvent::EVENT_NAME_CHANGE, this);
     mCachedTextEquiv = textEquiv;
+    return true;
   }
+
+  return false;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 // XULTreeGridCellAccessible: Accessible protected implementation
 
 Accessible*
 XULTreeGridCellAccessible::GetSiblingAtOffset(int32_t aOffset,
                                               nsresult* aError) const
--- a/accessible/src/xul/XULTreeGridAccessible.h
+++ b/accessible/src/xul/XULTreeGridAccessible.h
@@ -101,17 +101,16 @@ public:
 
 protected:
 
   // Accessible
   virtual void CacheChildren();
 
   // XULTreeItemAccessibleBase
   mutable AccessibleHashtable mAccessibleCache;
-  nsString mCachedName;
 };
 
 
 /**
  * Represents an accessible for XUL tree cell in the case when XUL tree has
  * multiple columns.
  */
 
@@ -175,18 +174,19 @@ public:
   virtual bool Selected() MOZ_OVERRIDE;
 
   // XULTreeGridCellAccessible
   NS_DECLARE_STATIC_IID_ACCESSOR(XULTREEGRIDCELLACCESSIBLE_IMPL_CID)
 
   /**
    * Fire name or state change event if the accessible text or value has been
    * changed.
+   * @return true if name has changed
    */
-  void CellInvalidated();
+  bool CellInvalidated();
 
 protected:
   // Accessible
   virtual Accessible* GetSiblingAtOffset(int32_t aOffset,
                                          nsresult* aError = nullptr) const;
   virtual void DispatchClickEvent(nsIContent* aContent, uint32_t aActionIndex);
 
   // XULTreeGridCellAccessible
--- a/b2g/app/b2g.js
+++ b/b2g/app/b2g.js
@@ -874,19 +874,19 @@ pref("osfile.reset_worker_delay", 5000);
 
 // APZC preferences.
 //
 // Gaia relies heavily on scroll events for now, so lets fire them
 // more often than the default value (100).
 pref("apz.asyncscroll.throttle", 40);
 pref("apz.pan_repaint_interval", 16);
 
-// Maximum fling velocity in px/ms.  Slower devices may need to reduce this
+// Maximum fling velocity in inches/ms.  Slower devices may need to reduce this
 // to avoid checkerboarding.  Note, float value must be set as a string.
-pref("apz.max_velocity_pixels_per_ms", "6.0");
+pref("apz.max_velocity_inches_per_ms", "0.0375");
 
 // Tweak default displayport values to reduce the risk of running out of
 // memory when zooming in
 pref("apz.x_skate_size_multiplier", "1.25");
 pref("apz.y_skate_size_multiplier", "1.5");
 pref("apz.x_stationary_size_multiplier", "1.5");
 pref("apz.y_stationary_size_multiplier", "1.8");
 pref("apz.enlarge_displayport_when_clipped", true);
--- a/browser/base/content/browser.js
+++ b/browser/base/content/browser.js
@@ -6,16 +6,18 @@
 let Ci = Components.interfaces;
 let Cu = Components.utils;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 Cu.import("resource://gre/modules/NotificationDB.jsm");
 Cu.import("resource:///modules/RecentWindow.jsm");
 Cu.import("resource://gre/modules/WindowsPrefSync.jsm");
 
+XPCOMUtils.defineLazyModuleGetter(this, "BrowserUtils",
+                                  "resource://gre/modules/BrowserUtils.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "Task",
                                   "resource://gre/modules/Task.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "CharsetMenu",
                                   "resource://gre/modules/CharsetMenu.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "ShortcutUtils",
                                   "resource://gre/modules/ShortcutUtils.jsm");
 
 const nsIWebNavigation = Ci.nsIWebNavigation;
@@ -4835,23 +4837,21 @@ function UpdateDynamicShortcutTooltipTex
  *
  * @param aCharLen
  *        The maximum number of characters to return.
  */
 function getBrowserSelection(aCharLen) {
   // selections of more than 150 characters aren't useful
   const kMaxSelectionLen = 150;
   const charLen = Math.min(aCharLen || kMaxSelectionLen, kMaxSelectionLen);
-  let commandDispatcher = document.commandDispatcher;
-
-  var focusedWindow = commandDispatcher.focusedWindow;
+
+  let [element, focusedWindow] = BrowserUtils.getFocusSync(document);
   var selection = focusedWindow.getSelection().toString();
   // try getting a selected text in text input.
   if (!selection) {
-    let element = commandDispatcher.focusedElement;
     var isOnTextInput = function isOnTextInput(elem) {
       // we avoid to return a value if a selection is in password field.
       // ref. bug 565717
       return elem instanceof HTMLTextAreaElement ||
              (elem instanceof HTMLInputElement && elem.mozIsTextField(true));
     };
 
     if (isOnTextInput(element)) {
--- a/browser/base/content/content.js
+++ b/browser/base/content/content.js
@@ -3,16 +3,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 let Cc = Components.classes;
 let Ci = Components.interfaces;
 let Cu = Components.utils;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
 
 XPCOMUtils.defineLazyModuleGetter(this, "ContentLinkHandler",
   "resource:///modules/ContentLinkHandler.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "LoginManagerContent",
   "resource://gre/modules/LoginManagerContent.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "InsecurePasswordUtils",
   "resource://gre/modules/InsecurePasswordUtils.jsm");
 XPCOMUtils.defineLazyModuleGetter(this, "PrivateBrowsingUtils",
--- a/browser/base/content/nsContextMenu.js
+++ b/browser/base/content/nsContextMenu.js
@@ -75,18 +75,17 @@ nsContextMenu.prototype = {
                           mailtoHandler.preferredAction == Ci.nsIHandlerInfo.useHelperApp &&
                           (mailtoHandler.preferredApplicationHandler instanceof Ci.nsIWebHandlerApp));
     }
 
     // Time to do some bad things and see if we've highlighted a URL that
     // isn't actually linked.
     if (this.isTextSelected && !this.onLink) {
       // Ok, we have some text, let's figure out if it looks like a URL.
-      let selection =  document.commandDispatcher.focusedWindow
-                               .getSelection();
+      let selection =  this.focusedWindow.getSelection();
       let linkText = selection.toString().trim();
       let uri;
       if (/^(?:https?|ftp):/i.test(linkText)) {
         try {
           uri = makeURI(linkText);
         } catch (ex) {}
       }
       // Check if this could be a valid url, just missing the protocol.
@@ -544,16 +543,20 @@ nsContextMenu.prototype = {
     this.onCTPPlugin       = false;
     this.canSpellCheck     = false;
     this.textSelected      = getBrowserSelection();
     this.isTextSelected    = this.textSelected.length != 0;
 
     // Remember the node that was clicked.
     this.target = aNode;
 
+    let [elt, win] = BrowserUtils.getFocusSync(document);
+    this.focusedWindow = win;
+    this.focusedElement = elt;
+
     // If this is a remote context menu event, use the information from
     // gContextMenuContentData instead.
     if (this.isRemote) {
       this.browser = gContextMenuContentData.browser;
     } else {
       this.browser = this.target.ownerDocument.defaultView
                                   .QueryInterface(Ci.nsIInterfaceRequestor)
                                   .getInterface(Ci.nsIWebNavigation)
@@ -1247,17 +1250,17 @@ nsContextMenu.prototype = {
   },
 
   // Save URL of clicked-on link.
   saveLink: function() {
     var doc =  this.target.ownerDocument;
     var linkText;
     // If selected text is found to match valid URL pattern.
     if (this.onPlainTextLink)
-      linkText = document.commandDispatcher.focusedWindow.getSelection().toString().trim();
+      linkText = this.focusedWindow.getSelection().toString().trim();
     else
       linkText = this.linkText();
     urlSecurityCheck(this.linkURL, this._unremotePrincipal(doc.nodePrincipal));
 
     this.saveHelper(this.linkURL, linkText, null, true, doc);
   },
 
   // Backwards-compatibility wrapper
@@ -1444,17 +1447,17 @@ nsContextMenu.prototype = {
       }
     }
 
     return text;
   },
 
   // Returns true if anything is selected.
   isContentSelection: function() {
-    return !document.commandDispatcher.focusedWindow.getSelection().isCollapsed;
+    return !this.focusedWindow.getSelection().isCollapsed;
   },
 
   toString: function () {
     return "contextMenu.target     = " + this.target + "\n" +
            "contextMenu.onImage    = " + this.onImage + "\n" +
            "contextMenu.onLink     = " + this.onLink + "\n" +
            "contextMenu.link       = " + this.link + "\n" +
            "contextMenu.inFrame    = " + this.inFrame + "\n" +
@@ -1546,17 +1549,17 @@ nsContextMenu.prototype = {
   bookmarkThisPage: function CM_bookmarkThisPage() {
     window.top.PlacesCommandHook.bookmarkPage(this.browser, PlacesUtils.bookmarksMenuFolderId, true);
   },
 
   bookmarkLink: function CM_bookmarkLink() {
     var linkText;
     // If selected text is found to match valid URL pattern.
     if (this.onPlainTextLink)
-      linkText = document.commandDispatcher.focusedWindow.getSelection().toString().trim();
+      linkText = this.focusedWindow.getSelection().toString().trim();
     else
       linkText = this.linkText();
     window.top.PlacesCommandHook.bookmarkLink(PlacesUtils.bookmarksMenuFolderId, this.linkURL,
                                               linkText);
   },
 
   addBookmarkForFrame: function CM_addBookmarkForFrame() {
     var doc = this.target.ownerDocument;
--- a/browser/base/content/test/general/browser_save_link-perwindowpb.js
+++ b/browser/base/content/test/general/browser_save_link-perwindowpb.js
@@ -1,148 +1,177 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 var MockFilePicker = SpecialPowers.MockFilePicker;
 MockFilePicker.init(window);
 
-let tempScope = {};
-Cu.import("resource://gre/modules/NetUtil.jsm", tempScope);
-let NetUtil = tempScope.NetUtil;
-
 // Trigger a save of a link in public mode, then trigger an identical save
 // in private mode and ensure that the second request is differentiated from
 // the first by checking that cookies set by the first response are not sent
 // during the second request.
 function triggerSave(aWindow, aCallback) {
+  info("started triggerSave");
   var fileName;
   let testBrowser = aWindow.gBrowser.selectedBrowser;
   // This page sets a cookie if and only if a cookie does not exist yet
   let testURI = "http://mochi.test:8888/browser/browser/base/content/test/general/bug792517-2.html";
   testBrowser.loadURI(testURI);
   testBrowser.addEventListener("pageshow", function pageShown(event) {
+    info("got pageshow with " + event.target.location);
     if (event.target.location != testURI) {
+      info("try again!");
       testBrowser.loadURI(testURI);
       return;
     }
+    info("found our page!");
     testBrowser.removeEventListener("pageshow", pageShown, false);
 
-    executeSoon(function () {
-      aWindow.document.addEventListener("popupshown", function(e) contextMenuOpened(aWindow, e), false);
+    waitForFocus(function () {
+      info("register to handle popupshown");
+      aWindow.document.addEventListener("popupshown", contextMenuOpened, false);
 
       var link = testBrowser.contentDocument.getElementById("fff");
+      info("link: " + link);
       EventUtils.synthesizeMouseAtCenter(link,
                                          { type: "contextmenu", button: 2 },
                                          testBrowser.contentWindow);
-    });
+      info("right clicked!");
+    }, testBrowser.contentWindow);
   }, false);
 
-  function contextMenuOpened(aWindow, event) {
-    event.currentTarget.removeEventListener("popupshown", contextMenuOpened, false);
+  function contextMenuOpened(event) {
+    info("contextMenuOpened");
+    aWindow.document.removeEventListener("popupshown", contextMenuOpened);
 
     // Create the folder the link will be saved into.
     var destDir = createTemporarySaveDirectory();
     var destFile = destDir.clone();
 
     MockFilePicker.displayDirectory = destDir;
     MockFilePicker.showCallback = function(fp) {
+      info("showCallback");
       fileName = fp.defaultString;
+      info("fileName: " + fileName);
       destFile.append (fileName);
       MockFilePicker.returnFiles = [destFile];
       MockFilePicker.filterIndex = 1; // kSaveAsType_URL
+      info("done showCallback");
     };
 
     mockTransferCallback = function(downloadSuccess) {
+      info("mockTransferCallback");
       onTransferComplete(aWindow, downloadSuccess, destDir);
       destDir.remove(true);
       ok(!destDir.exists(), "Destination dir should be removed");
       ok(!destFile.exists(), "Destination file should be removed");
-      mockTransferCallback = function(){};
+      mockTransferCallback = null;
+      info("done mockTransferCallback");
     }
 
     // Select "Save Link As" option from context menu
     var saveLinkCommand = aWindow.document.getElementById("context-savelink");
+    info("saveLinkCommand: " + saveLinkCommand);
     saveLinkCommand.doCommand();
 
     event.target.hidePopup();
+    info("popup hidden");
   }
 
   function onTransferComplete(aWindow, downloadSuccess, destDir) {
     ok(downloadSuccess, "Link should have been downloaded successfully");
-    aWindow.gBrowser.removeCurrentTab();
+    aWindow.close();
 
     executeSoon(function() aCallback());
   }
 }
 
 function test() {
+  info("Start the test");
   waitForExplicitFinish();
 
-  var windowsToClose = [];
   var gNumSet = 0;
   function testOnWindow(options, callback) {
+    info("testOnWindow(" + options + ")");
     var win = OpenBrowserWindow(options);
+    info("got " + win);
     whenDelayedStartupFinished(win, () => callback(win));
   }
 
   function whenDelayedStartupFinished(aWindow, aCallback) {
+    info("whenDelayedStartupFinished");
     Services.obs.addObserver(function observer(aSubject, aTopic) {
+      info("whenDelayedStartupFinished, got topic: " + aTopic + ", got subject: " + aSubject + ", waiting for " + aWindow);
       if (aWindow == aSubject) {
         Services.obs.removeObserver(observer, aTopic);
         executeSoon(aCallback);
+        info("whenDelayedStartupFinished found our window");
       }
     }, "browser-delayed-startup-finished", false);
   }
 
   mockTransferRegisterer.register();
 
   registerCleanupFunction(function () {
+    info("Running the cleanup code");
     mockTransferRegisterer.unregister();
     MockFilePicker.cleanup();
-    windowsToClose.forEach(function(win) {
-      win.close();
-    });
     Services.obs.removeObserver(observer, "http-on-modify-request");
     Services.obs.removeObserver(observer, "http-on-examine-response");
+    info("Finished running the cleanup code");
   });
  
   function observer(subject, topic, state) {
+    info("observer called with " + topic);
     if (topic == "http-on-modify-request") {
       onModifyRequest(subject);
     } else if (topic == "http-on-examine-response") {
       onExamineResponse(subject);
     }
   }
 
   function onExamineResponse(subject) {
     let channel = subject.QueryInterface(Ci.nsIHttpChannel);
+    info("onExamineResponse with " + channel.URI.spec);
     if (channel.URI.spec != "http://mochi.test:8888/browser/browser/base/content/test/general/bug792517.sjs") {
+      info("returning");
       return;
     }
     try {
       let cookies = channel.getResponseHeader("set-cookie");
       // From browser/base/content/test/general/bug792715.sjs, we receive a Set-Cookie
       // header with foopy=1 when there are no cookies for that domain.
       is(cookies, "foopy=1", "Cookie should be foopy=1");
       gNumSet += 1;
-    } catch (ex if ex.result == Cr.NS_ERROR_NOT_AVAILABLE) { }
+      info("gNumSet = " + gNumSet);
+    } catch (ex if ex.result == Cr.NS_ERROR_NOT_AVAILABLE) {
+      info("onExamineResponse caught NOTAVAIL" + ex);
+    } catch (ex) {
+      info("ionExamineResponse caught " + ex);
+    }
   }
 
   function onModifyRequest(subject) {
     let channel = subject.QueryInterface(Ci.nsIHttpChannel);
+    info("onModifyRequest with " + channel.URI.spec);
     if (channel.URI.spec != "http://mochi.test:8888/browser/browser/base/content/test/general/bug792517.sjs") {
       return;
     }
     try {
       let cookies = channel.getRequestHeader("cookie");
+      info("cookies: " + cookies);
       // From browser/base/content/test/general/bug792715.sjs, we should never send a
       // cookie because we are making only 2 requests: one in public mode, and
       // one in private mode.
       throw "We should never send a cookie in this test";
-    } catch (ex if ex.result == Cr.NS_ERROR_NOT_AVAILABLE) { }
+    } catch (ex if ex.result == Cr.NS_ERROR_NOT_AVAILABLE) {
+      info("onModifyRequest caught NOTAVAIL" + ex);
+    } catch (ex) {
+      info("ionModifyRequest caught " + ex);
+    }
   }
 
   Services.obs.addObserver(observer, "http-on-modify-request", false);
   Services.obs.addObserver(observer, "http-on-examine-response", false);
 
   testOnWindow(undefined, function(win) {
     // The first save from a regular window sets a cookie.
     triggerSave(win, function() {
@@ -164,12 +193,15 @@ Cc["@mozilla.org/moz/jssubscript-loader;
   .loadSubScript("chrome://mochitests/content/browser/toolkit/content/tests/browser/common/mockTransfer.js",
                  this);
 
 function createTemporarySaveDirectory() {
   var saveDir = Cc["@mozilla.org/file/directory_service;1"]
                   .getService(Ci.nsIProperties)
                   .get("TmpD", Ci.nsIFile);
   saveDir.append("testsavedir");
-  if (!saveDir.exists())
+  if (!saveDir.exists()) {
+    info("create testsavedir!");
     saveDir.create(Ci.nsIFile.DIRECTORY_TYPE, 0755);
+  }
+  info("return from createTempSaveDir: " + saveDir.path);
   return saveDir;
 }
--- a/browser/metro/base/content/bindings/browser.xml
+++ b/browser/metro/base/content/bindings/browser.xml
@@ -63,20 +63,16 @@
 
       <field name="_documentURI">null</field>
       <property name="documentURI"
                 onget="return this._documentURI ? this._ios.newURI(this._documentURI, null, null) : null"
                 readonly="true"/>
 
       <field name="contentWindowId">null</field>
 
-      <property name="messageManager"
-                onget="return this._frameLoader.messageManager;"
-                readonly="true"/>
-
       <field name="_contentTitle">null</field>
 
       <field name="_ios">
          Components.classes["@mozilla.org/network/io-service;1"].getService(Components.interfaces.nsIIOService);
       </field>
 
       <!--
         * Point Conversion Routines - browsers may be shifted by UI such that
--- a/build/pgo/profileserver.py
+++ b/build/pgo/profileserver.py
@@ -12,16 +12,17 @@ import json
 import socket
 import threading
 import os
 import sys
 import shutil
 import tempfile
 from datetime import datetime
 from mozbuild.base import MozbuildObject
+from buildconfig import substs
 
 PORT = 8888
 
 if __name__ == '__main__':
   cli = CLI()
   debug_args, interactive = cli.debugger_arguments()
 
   build = MozbuildObject.from_environment()
@@ -49,16 +50,24 @@ if __name__ == '__main__':
     profile = FirefoxProfile(profile=profilePath,
                              preferences=prefs,
                              addons=[os.path.join(build.distdir, 'xpi-stage', 'quitter')],
                              locations=locations)
 
     env = os.environ.copy()
     env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
     env["XPCOM_DEBUG_BREAK"] = "warn"
+
+    # For VC12, make sure we can find the right bitness of pgort120.dll
+    if "VS120COMNTOOLS" in env and not substs["HAVE_64BIT_OS"]:
+      vc12dir = os.path.abspath(os.path.join(env["VS120COMNTOOLS"],
+                                             "../../VC/bin"))
+      if os.path.exists(vc12dir):
+        env["PATH"] = vc12dir + ";" + env["PATH"]
+
     jarlog = os.getenv("JARLOG_FILE")
     if jarlog:
       env["MOZ_JAR_LOG_FILE"] = os.path.abspath(jarlog)
       print "jarlog: %s" % env["MOZ_JAR_LOG_FILE"]
 
     cmdargs = ["http://localhost:%d/index.html" % PORT]
     runner = FirefoxRunner(profile=profile,
                            binary=build.get_binary_path(where="staged-package"),
--- a/content/base/src/nsContentUtils.cpp
+++ b/content/base/src/nsContentUtils.cpp
@@ -1511,18 +1511,16 @@ nsContentUtils::Shutdown()
   delete sOSText;
   sOSText = nullptr;
   delete sAltText;  
   sAltText = nullptr;
   delete sModifierSeparator;
   sModifierSeparator = nullptr;
 
   NS_IF_RELEASE(sSameOriginChecker);
-
-  nsTextEditorState::ShutDown();
 }
 
 /**
  * Checks whether two nodes come from the same origin. aTrustedNode is
  * considered 'safe' in that a user can operate on it and that it isn't
  * a js-object that implements nsIDOMNode.
  * Never call this function with the first node provided by script, it
  * must always be known to be a 'real' node!
--- a/content/canvas/src/WebGLContext.cpp
+++ b/content/canvas/src/WebGLContext.cpp
@@ -282,16 +282,17 @@ WebGLContext::DestroyResourcesAndContext
     // disable all extensions except "WEBGL_lose_context". see bug #927969
     // spec: http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
     for (size_t i = 0; i < size_t(WebGLExtensionID_max); ++i) {
         WebGLExtensionID extension = WebGLExtensionID(i);
 
         if (!IsExtensionEnabled(extension) || (extension == WEBGL_lose_context))
             continue;
 
+        mExtensions[extension]->MarkLost();
         mExtensions[extension] = nullptr;
     }
 
     // We just got rid of everything, so the context had better
     // have been going away.
 #ifdef DEBUG
     if (gl->DebugMode()) {
         printf_stderr("--- WebGL context destroyed: %p\n", gl.get());
--- a/content/canvas/src/WebGLContextGL.cpp
+++ b/content/canvas/src/WebGLContextGL.cpp
@@ -715,16 +715,19 @@ WebGLContext::DeleteRenderbuffer(WebGLRe
         return;
 
     if (!rbuf || rbuf->IsDeleted())
         return;
 
     if (mBoundFramebuffer)
         mBoundFramebuffer->DetachRenderbuffer(rbuf);
 
+    // Invalidate framebuffer status cache
+    rbuf->NotifyFBsStatusChanged();
+
     if (mBoundRenderbuffer == rbuf)
         BindRenderbuffer(LOCAL_GL_RENDERBUFFER,
                          static_cast<WebGLRenderbuffer*>(nullptr));
 
     rbuf->RequestDelete();
 }
 
 void
@@ -737,16 +740,19 @@ WebGLContext::DeleteTexture(WebGLTexture
         return;
 
     if (!tex || tex->IsDeleted())
         return;
 
     if (mBoundFramebuffer)
         mBoundFramebuffer->DetachTexture(tex);
 
+    // Invalidate framebuffer status cache
+    tex->NotifyFBsStatusChanged();
+
     GLuint activeTexture = mActiveTexture;
     for (int32_t i = 0; i < mGLMaxTextureUnits; i++) {
         if ((tex->Target() == LOCAL_GL_TEXTURE_2D && mBound2DTextures[i] == tex) ||
             (tex->Target() == LOCAL_GL_TEXTURE_CUBE_MAP && mBoundCubeMapTextures[i] == tex))
         {
             ActiveTexture(LOCAL_GL_TEXTURE0 + i);
             BindTexture(tex->Target(), static_cast<WebGLTexture*>(nullptr));
         }
@@ -2657,16 +2663,18 @@ WebGLContext::RenderbufferStorage(GLenum
     }
 
     MakeContextCurrent();
 
     bool sizeChanges = width != mBoundRenderbuffer->Width() ||
                        height != mBoundRenderbuffer->Height() ||
                        internalformat != mBoundRenderbuffer->InternalFormat();
     if (sizeChanges) {
+        // Invalidate framebuffer status cache
+        mBoundRenderbuffer->NotifyFBsStatusChanged();
         GetAndFlushUnderlyingGLErrors();
         mBoundRenderbuffer->RenderbufferStorage(internalformatForGL, width, height);
         GLenum error = GetAndFlushUnderlyingGLErrors();
         if (error) {
             GenerateWarning("renderbufferStorage generated error %s", ErrorName(error));
             return;
         }
     } else {
--- a/content/canvas/src/WebGLContextValidate.cpp
+++ b/content/canvas/src/WebGLContextValidate.cpp
@@ -1168,33 +1168,36 @@ WebGLContext::ValidateTexImageFormatAndT
     bool validCombo = false;
 
     switch (format) {
     case LOCAL_GL_ALPHA:
     case LOCAL_GL_LUMINANCE:
     case LOCAL_GL_LUMINANCE_ALPHA:
         validCombo = (type == LOCAL_GL_UNSIGNED_BYTE ||
                       type == LOCAL_GL_HALF_FLOAT ||
+                      type == LOCAL_GL_HALF_FLOAT_OES ||
                       type == LOCAL_GL_FLOAT);
         break;
 
     case LOCAL_GL_RGB:
     case LOCAL_GL_SRGB:
         validCombo = (type == LOCAL_GL_UNSIGNED_BYTE ||
                       type == LOCAL_GL_UNSIGNED_SHORT_5_6_5 ||
                       type == LOCAL_GL_HALF_FLOAT ||
+                      type == LOCAL_GL_HALF_FLOAT_OES ||
                       type == LOCAL_GL_FLOAT);
         break;
 
     case LOCAL_GL_RGBA:
     case LOCAL_GL_SRGB_ALPHA:
         validCombo = (type == LOCAL_GL_UNSIGNED_BYTE ||
                       type == LOCAL_GL_UNSIGNED_SHORT_4_4_4_4 ||
                       type == LOCAL_GL_UNSIGNED_SHORT_5_5_5_1 ||
                       type == LOCAL_GL_HALF_FLOAT ||
+                      type == LOCAL_GL_HALF_FLOAT_OES ||
                       type == LOCAL_GL_FLOAT);
         break;
 
     case LOCAL_GL_DEPTH_COMPONENT:
         validCombo = (type == LOCAL_GL_UNSIGNED_SHORT ||
                       type == LOCAL_GL_UNSIGNED_INT);
         break;
 
@@ -1247,18 +1250,21 @@ WebGLContext::ValidateTexInputData(GLenu
 
     // First, we check for packed types
     switch (type) {
     case LOCAL_GL_UNSIGNED_BYTE:
         validInput = (jsArrayType == -1 || jsArrayType == js::ArrayBufferView::TYPE_UINT8);
         break;
 
         // TODO: WebGL spec doesn't allow half floats to specified as UInt16.
-    // case LOCAL_GL_HALF_FLOAT:
-    // case LOCAL_GL_HALF_FLOAT_OES:
+    case LOCAL_GL_HALF_FLOAT:
+    case LOCAL_GL_HALF_FLOAT_OES:
+        validInput = (jsArrayType == -1);
+        break;
+
     case LOCAL_GL_UNSIGNED_SHORT:
     case LOCAL_GL_UNSIGNED_SHORT_4_4_4_4:
     case LOCAL_GL_UNSIGNED_SHORT_5_5_5_1:
     case LOCAL_GL_UNSIGNED_SHORT_5_6_5:
         validInput = (jsArrayType == -1 || jsArrayType == js::ArrayBufferView::TYPE_UINT16);
         break;
 
     case LOCAL_GL_UNSIGNED_INT:
--- a/content/canvas/src/WebGLExtensionBase.cpp
+++ b/content/canvas/src/WebGLExtensionBase.cpp
@@ -5,20 +5,27 @@
 
 #include "WebGLContext.h"
 #include "WebGLExtensions.h"
 
 using namespace mozilla;
 
 WebGLExtensionBase::WebGLExtensionBase(WebGLContext* context)
     : WebGLContextBoundObject(context)
+    , mIsLost(false)
 {
     SetIsDOMBinding();
 }
 
 WebGLExtensionBase::~WebGLExtensionBase()
 {
 }
 
+void
+WebGLExtensionBase::MarkLost()
+{
+    mIsLost = true;
+}
+
 NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_0(WebGLExtensionBase)
 
 NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebGLExtensionBase, AddRef)
 NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebGLExtensionBase, Release)
--- a/content/canvas/src/WebGLExtensionDebugShaders.cpp
+++ b/content/canvas/src/WebGLExtensionDebugShaders.cpp
@@ -21,16 +21,21 @@ WebGLExtensionDebugShaders::~WebGLExtens
 /* If no source has been defined, compileShader() has not been called,
  * or the translation has failed for shader, an empty string is
  * returned; otherwise, return the translated source.
  */
 void
 WebGLExtensionDebugShaders::GetTranslatedShaderSource(WebGLShader* shader,
                                                       nsAString& retval)
 {
+    if (mIsLost) {
+        return mContext->ErrorInvalidOperation("getTranslatedShaderSource: "
+                                               "Extension is lost.");
+    }
+
     mContext->GetShaderTranslatedSource(shader, retval);
 
     if (retval.IsVoid()) {
         CopyASCIItoUTF16("", retval);
     }
 }
 
 IMPL_WEBGL_EXTENSION_GOOP(WebGLExtensionDebugShaders)
--- a/content/canvas/src/WebGLExtensionDrawBuffers.cpp
+++ b/content/canvas/src/WebGLExtensionDrawBuffers.cpp
@@ -1,8 +1,9 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WebGLContext.h"
 #include "WebGLExtensions.h"
 #include "mozilla/dom/WebGLRenderingContextBinding.h"
 #include "WebGLTexture.h"
@@ -44,16 +45,19 @@ WebGLExtensionDrawBuffers::WebGLExtensio
 }
 
 WebGLExtensionDrawBuffers::~WebGLExtensionDrawBuffers()
 {
 }
 
 void WebGLExtensionDrawBuffers::DrawBuffersWEBGL(const dom::Sequence<GLenum>& buffers)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("drawBuffersWEBGL: Extension is lost.");
+
     mContext->DrawBuffers(buffers);
 }
 
 bool WebGLExtensionDrawBuffers::IsSupported(const WebGLContext* context)
 {
     gl::GLContext * gl = context->GL();
 
     if (!gl->IsSupported(GLFeature::draw_buffers)) {
--- a/content/canvas/src/WebGLExtensionInstancedArrays.cpp
+++ b/content/canvas/src/WebGLExtensionInstancedArrays.cpp
@@ -20,30 +20,39 @@ WebGLExtensionInstancedArrays::WebGLExte
 WebGLExtensionInstancedArrays::~WebGLExtensionInstancedArrays()
 {
 }
 
 void
 WebGLExtensionInstancedArrays::DrawArraysInstancedANGLE(GLenum mode, GLint first,
                                                         GLsizei count, GLsizei primcount)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("drawArraysInstancedANGLE: Extension is lost.");
+
     mContext->DrawArraysInstanced(mode, first, count, primcount);
 }
 
 void
 WebGLExtensionInstancedArrays::DrawElementsInstancedANGLE(GLenum mode, GLsizei count,
                                                           GLenum type, WebGLintptr offset,
                                                           GLsizei primcount)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("drawElementsInstancedANGLE: Extension is lost.");
+
     mContext->DrawElementsInstanced(mode, count, type, offset, primcount);
 }
 
 void
 WebGLExtensionInstancedArrays::VertexAttribDivisorANGLE(GLuint index, GLuint divisor)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("vertexAttribDivisorANGLE: Extension is lost.");
+
     mContext->VertexAttribDivisor(index, divisor);
 }
 
 bool
 WebGLExtensionInstancedArrays::IsSupported(const WebGLContext* context)
 {
     gl::GLContext* gl = context->GL();
 
--- a/content/canvas/src/WebGLExtensionVertexArray.cpp
+++ b/content/canvas/src/WebGLExtensionVertexArray.cpp
@@ -20,31 +20,47 @@ WebGLExtensionVertexArray::WebGLExtensio
 }
 
 WebGLExtensionVertexArray::~WebGLExtensionVertexArray()
 {
 }
 
 already_AddRefed<WebGLVertexArray> WebGLExtensionVertexArray::CreateVertexArrayOES()
 {
+    if (mIsLost) {
+        mContext->ErrorInvalidOperation("createVertexArrayOES: Extension is lost. Returning NULL.");
+        return nullptr;
+    }
+
     return mContext->CreateVertexArray();
 }
 
 void WebGLExtensionVertexArray::DeleteVertexArrayOES(WebGLVertexArray* array)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("deleteVertexArrayOES: Extension is lost.");
+
     mContext->DeleteVertexArray(array);
 }
 
 bool WebGLExtensionVertexArray::IsVertexArrayOES(WebGLVertexArray* array)
 {
+    if (mIsLost) {
+        mContext->ErrorInvalidOperation("isVertexArrayOES: Extension is lost. Returning false.");
+        return false;
+    }
+
     return mContext->IsVertexArray(array);
 }
 
 void WebGLExtensionVertexArray::BindVertexArrayOES(WebGLVertexArray* array)
 {
+    if (mIsLost)
+        return mContext->ErrorInvalidOperation("bindVertexArrayOES: Extension is lost.");
+
     mContext->BindVertexArray(array);
 }
 
 bool WebGLExtensionVertexArray::IsSupported(const WebGLContext* context)
 {
     gl::GLContext* gl = context->GL();
 
     return gl->IsSupported(gl::GLFeature::vertex_array_object);
--- a/content/canvas/src/WebGLExtensions.h
+++ b/content/canvas/src/WebGLExtensions.h
@@ -25,18 +25,23 @@ class WebGLExtensionBase
 public:
     WebGLExtensionBase(WebGLContext*);
     virtual ~WebGLExtensionBase();
 
     WebGLContext *GetParentObject() const {
         return Context();
     }
 
+    void MarkLost();
+
     NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebGLExtensionBase)
     NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(WebGLExtensionBase)
+
+protected:
+    bool mIsLost;
 };
 
 #define DECL_WEBGL_EXTENSION_GOOP                                           \
     virtual JSObject* WrapObject(JSContext *cx,                             \
                                  JS::Handle<JSObject*> scope) MOZ_OVERRIDE;
 
 #define IMPL_WEBGL_EXTENSION_GOOP(WebGLExtensionType) \
     JSObject* \
--- a/content/canvas/src/WebGLFramebuffer.cpp
+++ b/content/canvas/src/WebGLFramebuffer.cpp
@@ -20,16 +20,17 @@ using namespace mozilla::gl;
 JSObject*
 WebGLFramebuffer::WrapObject(JSContext* cx, JS::Handle<JSObject*> scope)
 {
     return dom::WebGLFramebufferBinding::Wrap(cx, scope, this);
 }
 
 WebGLFramebuffer::WebGLFramebuffer(WebGLContext* context)
     : WebGLContextBoundObject(context)
+    , mStatus(0)
     , mHasEverBeenBound(false)
     , mDepthAttachment(LOCAL_GL_DEPTH_ATTACHMENT)
     , mStencilAttachment(LOCAL_GL_STENCIL_ATTACHMENT)
     , mDepthStencilAttachment(LOCAL_GL_DEPTH_STENCIL_ATTACHMENT)
 {
     SetIsDOMBinding();
     mContext->MakeContextCurrent();
     mContext->gl->fGenFramebuffers(1, &mGLName);
@@ -386,61 +387,65 @@ WebGLFramebuffer::Delete()
 
 void
 WebGLFramebuffer::FramebufferRenderbuffer(GLenum target,
                                           GLenum attachment,
                                           GLenum rbtarget,
                                           WebGLRenderbuffer* wrb)
 {
     MOZ_ASSERT(mContext->mBoundFramebuffer == this);
+
     if (!mContext->ValidateObjectAllowNull("framebufferRenderbuffer: renderbuffer", wrb))
         return;
 
     if (target != LOCAL_GL_FRAMEBUFFER)
         return mContext->ErrorInvalidEnumInfo("framebufferRenderbuffer: target", target);
 
     if (rbtarget != LOCAL_GL_RENDERBUFFER)
         return mContext->ErrorInvalidEnumInfo("framebufferRenderbuffer: renderbuffer target:", rbtarget);
 
-    switch (attachment) {
-    case LOCAL_GL_DEPTH_ATTACHMENT:
-        mDepthAttachment.SetRenderbuffer(wrb);
-        break;
-    case LOCAL_GL_STENCIL_ATTACHMENT:
-        mStencilAttachment.SetRenderbuffer(wrb);
-        break;
-    case LOCAL_GL_DEPTH_STENCIL_ATTACHMENT:
-        mDepthStencilAttachment.SetRenderbuffer(wrb);
-        break;
-    default:
-        // finish checking that the 'attachment' parameter is among the allowed values
-        if (!CheckColorAttachmentNumber(attachment, "framebufferRenderbuffer")){
-            return;
-        }
+    /* Get the requested attachment. If result is NULL, attachment is
+     * invalid and an error is generated.
+     *
+     * Don't use GetAttachment(...) here because it opt builds it
+     * returns mColorAttachment[0] for invalid attachment, which we
+     * really don't want to mess with.
+     */
+    Attachment* a = GetAttachmentOrNull(attachment);
+    if (!a)
+        return; // Error generated internally to GetAttachmentOrNull.
 
-        size_t colorAttachmentId = size_t(attachment - LOCAL_GL_COLOR_ATTACHMENT0);
-        EnsureColorAttachments(colorAttachmentId);
-        mColorAttachments[colorAttachmentId].SetRenderbuffer(wrb);
-        break;
-    }
+    /* Invalidate cached framebuffer status and inform texture of it's
+     * new attachment
+     */
+    mStatus = 0;
+    // Detach current
+    if (a->Texture())
+        a->Texture()->DetachFrom(this, attachment);
+    else if (a->Renderbuffer())
+        a->Renderbuffer()->DetachFrom(this, attachment);
+
+    // Attach new
+    if (wrb)
+        wrb->AttachTo(this, attachment);
+
+    a->SetRenderbuffer(wrb);
 }
 
 void
 WebGLFramebuffer::FramebufferTexture2D(GLenum target,
                                        GLenum attachment,
                                        GLenum textarget,
                                        WebGLTexture* wtex,
                                        GLint level)
 {
     MOZ_ASSERT(mContext->mBoundFramebuffer == this);
-    if (!mContext->ValidateObjectAllowNull("framebufferTexture2D: texture",
-                                           wtex))
-    {
+
+    if (!mContext->ValidateObjectAllowNull("framebufferTexture2D: texture", wtex))
         return;
-    }
 
     if (target != LOCAL_GL_FRAMEBUFFER)
         return mContext->ErrorInvalidEnumInfo("framebufferTexture2D: target", target);
 
     if (textarget != LOCAL_GL_TEXTURE_2D &&
         (textarget < LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
          textarget > LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z))
     {
@@ -453,35 +458,63 @@ WebGLFramebuffer::FramebufferTexture2D(G
         if (isTexture2D != isTexTarget2D) {
             return mContext->ErrorInvalidOperation("framebufferTexture2D: mismatched texture and texture target");
         }
     }
 
     if (level != 0)
         return mContext->ErrorInvalidValue("framebufferTexture2D: level must be 0");
 
-    switch (attachment) {
-    case LOCAL_GL_DEPTH_ATTACHMENT:
-        mDepthAttachment.SetTexImage(wtex, textarget, level);
-        break;
-    case LOCAL_GL_STENCIL_ATTACHMENT:
-        mStencilAttachment.SetTexImage(wtex, textarget, level);
-        break;
-    case LOCAL_GL_DEPTH_STENCIL_ATTACHMENT:
-        mDepthStencilAttachment.SetTexImage(wtex, textarget, level);
-        break;
-    default:
-        if (!CheckColorAttachmentNumber(attachment, "framebufferTexture2D"))
-            return;
+    /* Get the requested attachment. If result is NULL, attachment is
+     * invalid and an error is generated.
+     *
+     * Don't use GetAttachment(...) here because it opt builds it
+     * returns mColorAttachment[0] for invalid attachment, which we
+     * really don't want to mess with.
+     */
+    Attachment* a = GetAttachmentOrNull(attachment);
+    if (!a)
+        return; // Error generated internally to GetAttachmentOrNull.
+
+    /* Invalidate cached framebuffer status and inform texture of it's
+     * new attachment
+     */
+    mStatus = 0;
+    // Detach current
+    if (a->Texture())
+        a->Texture()->DetachFrom(this, attachment);
+    else if (a->Renderbuffer())
+        a->Renderbuffer()->DetachFrom(this, attachment);
 
-        size_t colorAttachmentId = size_t(attachment - LOCAL_GL_COLOR_ATTACHMENT0);
-        EnsureColorAttachments(colorAttachmentId);
-        mColorAttachments[colorAttachmentId].SetTexImage(wtex, textarget, level);
-        break;
-    }
+    // Attach new
+    if (wtex)
+        wtex->AttachTo(this, attachment);
+
+    a->SetTexImage(wtex, textarget, level);
+}
+
+WebGLFramebuffer::Attachment*
+WebGLFramebuffer::GetAttachmentOrNull(GLenum attachment)
+{
+    if (attachment == LOCAL_GL_DEPTH_STENCIL_ATTACHMENT)
+        return &mDepthStencilAttachment;
+
+    if (attachment == LOCAL_GL_DEPTH_ATTACHMENT)
+        return &mDepthAttachment;
+
+    if (attachment == LOCAL_GL_STENCIL_ATTACHMENT)
+        return &mStencilAttachment;
+
+    if (!CheckColorAttachmentNumber(attachment, "getAttachmentOrNull"))
+        return nullptr;
+
+    size_t colorAttachmentId = attachment - LOCAL_GL_COLOR_ATTACHMENT0;
+    EnsureColorAttachments(colorAttachmentId);
+
+    return &mColorAttachments[colorAttachmentId];
 }
 
 const WebGLFramebuffer::Attachment&
 WebGLFramebuffer::GetAttachment(GLenum attachment) const
 {
     if (attachment == LOCAL_GL_DEPTH_STENCIL_ATTACHMENT)
         return mDepthStencilAttachment;
     if (attachment == LOCAL_GL_DEPTH_ATTACHMENT)
@@ -671,27 +704,31 @@ WebGLFramebuffer::PrecheckFramebufferSta
         return LOCAL_GL_FRAMEBUFFER_UNSUPPORTED;
 
     return LOCAL_GL_FRAMEBUFFER_COMPLETE;
 }
 
 GLenum
 WebGLFramebuffer::CheckFramebufferStatus() const
 {
-    GLenum precheckStatus = PrecheckFramebufferStatus();
-    if (precheckStatus != LOCAL_GL_FRAMEBUFFER_COMPLETE)
-        return precheckStatus;
+    if (mStatus != 0)
+        return mStatus;
+
+    mStatus = PrecheckFramebufferStatus();
+    if (mStatus != LOCAL_GL_FRAMEBUFFER_COMPLETE)
+        return mStatus;
 
     // Looks good on our end. Let's ask the driver.
     mContext->MakeContextCurrent();
 
     // Ok, attach our chosen flavor of {DEPTH, STENCIL, DEPTH_STENCIL}.
     FinalizeAttachments();
 
-    return mContext->gl->fCheckFramebufferStatus(LOCAL_GL_FRAMEBUFFER);
+    mStatus = mContext->gl->fCheckFramebufferStatus(LOCAL_GL_FRAMEBUFFER);
+    return mStatus;
 }
 
 bool
 WebGLFramebuffer::HasCompletePlanes(GLbitfield mask)
 {
     if (CheckFramebufferStatus() != LOCAL_GL_FRAMEBUFFER_COMPLETE)
         return false;
 
@@ -826,16 +863,23 @@ void WebGLFramebuffer::EnsureColorAttach
 
     mColorAttachments.SetLength(colorAttachmentId + 1);
 
     for (size_t i = colorAttachmentId; i >= currentAttachmentCount; i--) {
         mColorAttachments[i].mAttachmentPoint = LOCAL_GL_COLOR_ATTACHMENT0 + i;
     }
 }
 
+void
+WebGLFramebuffer::NotifyAttachableChanged() const
+{
+    // Attachment has changed, so invalidate cached status
+    mStatus = 0;
+}
+
 static void
 FinalizeDrawAndReadBuffers(GLContext* aGL, bool aColorBufferDefined)
 {
     MOZ_ASSERT(aGL, "Expected a valid GLContext ptr.");
     // GLES don't support DrawBuffer()/ReadBuffer.
     // According to http://www.opengl.org/wiki/Framebuffer_Object
     //
     // Each draw buffers must either specify color attachment points that have images
--- a/content/canvas/src/WebGLFramebuffer.h
+++ b/content/canvas/src/WebGLFramebuffer.h
@@ -9,16 +9,17 @@
 #include "WebGLObjectModel.h"
 
 #include "nsWrapperCache.h"
 
 #include "mozilla/LinkedList.h"
 
 namespace mozilla {
 
+class WebGLFramebufferAttachable;
 class WebGLTexture;
 class WebGLRenderbuffer;
 namespace gl {
     class GLContext;
 }
 
 class WebGLFramebuffer MOZ_FINAL
     : public nsWrapperCache
@@ -109,16 +110,17 @@ public:
     void FramebufferTexture2D(GLenum target,
                               GLenum attachment,
                               GLenum textarget,
                               WebGLTexture* wtex,
                               GLint level);
 
 private:
     const WebGLRectangleObject& GetAnyRectObject() const;
+    Attachment* GetAttachmentOrNull(GLenum attachment);
 
 public:
     bool HasDefinedAttachments() const;
     bool HasIncompleteAttachments() const;
     bool AllImageRectsMatch() const;
     GLenum PrecheckFramebufferStatus() const;
     GLenum CheckFramebufferStatus() const;
 
@@ -169,21 +171,27 @@ public:
 
     // mask mirrors glClear.
     bool HasCompletePlanes(GLbitfield mask);
 
     bool CheckAndInitializeAttachments();
 
     bool CheckColorAttachmentNumber(GLenum attachment, const char* functionName) const;
 
+    void EnsureColorAttachments(size_t colorAttachmentId);
+
+    Attachment* AttachmentFor(GLenum attachment);
+    void NotifyAttachableChanged() const;
+
+private:
+    mutable GLenum mStatus;
+
     GLuint mGLName;
     bool mHasEverBeenBound;
 
-    void EnsureColorAttachments(size_t colorAttachmentId);
-
     // we only store pointers to attached renderbuffers, not to attached textures, because
     // we will only need to initialize renderbuffers. Textures are already initialized.
     nsTArray<Attachment> mColorAttachments;
     Attachment mDepthAttachment,
                mStencilAttachment,
                mDepthStencilAttachment;
 };
 
new file mode 100644
--- /dev/null
+++ b/content/canvas/src/WebGLFramebufferAttachable.cpp
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGLContext.h"
+#include "WebGLFramebufferAttachable.h"
+#include "WebGLFramebuffer.h"
+#include "WebGLRenderbuffer.h"
+#include "WebGLTexture.h"
+
+using namespace mozilla;
+
+WebGLFramebufferAttachable::AttachmentPoint*
+WebGLFramebufferAttachable::Contains(const WebGLFramebuffer* fb, GLenum attachment)
+{
+    AttachmentPoint* first = mAttachmentPoints.begin();
+    AttachmentPoint* last = mAttachmentPoints.end();
+
+    for (; first != last; ++first) {
+        if (first->mFB == fb && first->mAttachment == attachment)
+            return first;
+    }
+
+    return nullptr;
+}
+
+void
+WebGLFramebufferAttachable::AttachTo(WebGLFramebuffer* fb, GLenum attachment)
+{
+    MOZ_ASSERT(fb);
+    if (!fb)
+        return;
+
+    if (Contains(fb, attachment))
+        return; // Already attached. Ignore.
+
+    mAttachmentPoints.append(AttachmentPoint(fb, attachment));
+}
+
+void
+WebGLFramebufferAttachable::DetachFrom(WebGLFramebuffer* fb, GLenum attachment)
+{
+    MOZ_ASSERT(fb);
+    if (!fb)
+        return;
+
+    AttachmentPoint* point = Contains(fb, attachment);
+    if (!point) {
+        MOZ_ASSERT(false, "Is not attached to FB");
+        return;
+    }
+
+    mAttachmentPoints.erase(point);
+}
+
+void
+WebGLFramebufferAttachable::NotifyFBsStatusChanged()
+{
+    AttachmentPoint* first = mAttachmentPoints.begin();
+    AttachmentPoint* last = mAttachmentPoints.end();
+    for ( ; first != last; ++first)
+        first->mFB->NotifyAttachableChanged();
+}
new file mode 100644
--- /dev/null
+++ b/content/canvas/src/WebGLFramebufferAttachable.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGLFRAMEBUFFERATTACHABLE_H_
+#define WEBGLFRAMEBUFFERATTACHABLE_H_
+
+#include "GLDefs.h"
+#include "mozilla/Vector.h"
+
+namespace mozilla {
+
+class WebGLFramebuffer;
+
+class WebGLFramebufferAttachable
+{
+    struct AttachmentPoint
+    {
+        AttachmentPoint(const WebGLFramebuffer* fb, GLenum attachment)
+            : mFB(fb)
+            , mAttachment(attachment)
+        {}
+
+        const WebGLFramebuffer* mFB;
+        GLenum mAttachment;
+    };
+
+    Vector<AttachmentPoint> mAttachmentPoints;
+
+    AttachmentPoint* Contains(const WebGLFramebuffer* fb, GLenum attachment);
+
+public:
+
+    // Track FBO/Attachment combinations
+    void AttachTo(WebGLFramebuffer* fb, GLenum attachment);
+    void DetachFrom(WebGLFramebuffer* fb, GLenum attachment);
+    void NotifyFBsStatusChanged();
+};
+
+} // namespace mozilla
+
+#endif // !WEBGLFRAMEBUFFERATTACHABLE_H_
--- a/content/canvas/src/WebGLObjectModel.h
+++ b/content/canvas/src/WebGLObjectModel.h
@@ -300,17 +300,17 @@ public:
             mHeight = rect->Height();
         } else {
             mWidth = 0;
             mHeight = 0;
         }
     }
 
     bool HasSameDimensionsAs(const WebGLRectangleObject& other) const {
-        return Width() == other.Width() && Height() == other.Height(); 
+        return Width() == other.Width() && Height() == other.Height();
     }
 
 protected:
     GLsizei mWidth;
     GLsizei mHeight;
 };
 
 }// namespace mozilla
--- a/content/canvas/src/WebGLRenderbuffer.h
+++ b/content/canvas/src/WebGLRenderbuffer.h
@@ -2,29 +2,31 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WEBGLRENDERBUFFER_H_
 #define WEBGLRENDERBUFFER_H_
 
 #include "WebGLObjectModel.h"
+#include "WebGLFramebufferAttachable.h"
 
 #include "nsWrapperCache.h"
 
 #include "mozilla/LinkedList.h"
 
 namespace mozilla {
 
 class WebGLRenderbuffer MOZ_FINAL
     : public nsWrapperCache
     , public WebGLRefCountedObject<WebGLRenderbuffer>
     , public LinkedListElement<WebGLRenderbuffer>
     , public WebGLRectangleObject
     , public WebGLContextBoundObject
+    , public WebGLFramebufferAttachable
 {
 public:
     WebGLRenderbuffer(WebGLContext *context);
 
     ~WebGLRenderbuffer() {
         DeleteOnce();
     }
 
--- a/content/canvas/src/WebGLTexture.cpp
+++ b/content/canvas/src/WebGLTexture.cpp
@@ -142,16 +142,19 @@ WebGLTexture::SetImageInfo(GLenum aTarge
 
     EnsureMaxLevelWithCustomImagesAtLeast(aLevel);
 
     ImageInfoAt(aTarget, aLevel) = ImageInfo(aWidth, aHeight, aFormat, aType, aStatus);
 
     if (aLevel > 0)
         SetCustomMipmap();
 
+    // Invalidate framebuffer status cache
+    NotifyFBsStatusChanged();
+
     SetFakeBlackStatus(WebGLTextureFakeBlackStatus::Unknown);
 }
 
 void
 WebGLTexture::SetGeneratedMipmap() {
     if (!mHaveGeneratedMipmap) {
         mHaveGeneratedMipmap = true;
         SetFakeBlackStatus(WebGLTextureFakeBlackStatus::Unknown);
--- a/content/canvas/src/WebGLTexture.h
+++ b/content/canvas/src/WebGLTexture.h
@@ -2,16 +2,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WEBGLTEXTURE_H_
 #define WEBGLTEXTURE_H_
 
 #include "WebGLObjectModel.h"
+#include "WebGLFramebufferAttachable.h"
 
 #include "nsWrapperCache.h"
 
 #include "mozilla/LinkedList.h"
 #include <algorithm>
 
 namespace mozilla {
 
@@ -32,16 +33,17 @@ inline bool FormatHasAlpha(GLenum format
 
 // NOTE: When this class is switched to new DOM bindings, update the (then-slow)
 // WrapObject calls in GetParameter and GetFramebufferAttachmentParameter.
 class WebGLTexture MOZ_FINAL
     : public nsWrapperCache
     , public WebGLRefCountedObject<WebGLTexture>
     , public LinkedListElement<WebGLTexture>
     , public WebGLContextBoundObject
+    , public WebGLFramebufferAttachable
 {
 public:
     WebGLTexture(WebGLContext *context);
 
     ~WebGLTexture() {
         DeleteOnce();
     }
 
--- a/content/canvas/src/moz.build
+++ b/content/canvas/src/moz.build
@@ -62,16 +62,17 @@ if CONFIG['MOZ_WEBGL']:
         'WebGLExtensionStandardDerivatives.cpp',
         'WebGLExtensionTextureFilterAnisotropic.cpp',
         'WebGLExtensionTextureFloat.cpp',
         'WebGLExtensionTextureFloatLinear.cpp',
         'WebGLExtensionTextureHalfFloat.cpp',
         'WebGLExtensionTextureHalfFloatLinear.cpp',
         'WebGLExtensionVertexArray.cpp',
         'WebGLFramebuffer.cpp',
+        'WebGLFramebufferAttachable.cpp',
         'WebGLObjectModel.cpp',
         'WebGLProgram.cpp',
         'WebGLQuery.cpp',
         'WebGLRenderbuffer.cpp',
         'WebGLShader.cpp',
         'WebGLShaderPrecisionFormat.cpp',
         'WebGLTexelConversions.cpp',
         'WebGLTexture.cpp',
@@ -99,9 +100,8 @@ LOCAL_INCLUDES += [
     '/content/xul/content/src',
     '/dom/base',
     '/image/src',
     '/js/xpconnect/src',
     '/layout/generic',
     '/layout/style',
     '/layout/xul',
 ]
-
--- a/content/canvas/test/reftest/reftest.list
+++ b/content/canvas/test/reftest/reftest.list
@@ -143,22 +143,25 @@ fuzzy-if(B2G,256,83) random-if(Android&&
 random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=0.0&alphaVal=1.0  black.html
 
 fuzzy-if(B2G,256,83) random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.0        wrapper.html?colors.png
 random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.0&alpha  wrapper.html?white.png
 
 fuzzy(1,65536) fuzzy-if(B2G,256,83) fuzzy-if(Android||B2G,9,65536) random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=0.5&alphaVal=1.0  wrapper.html?half-colors.png
 
 # Test premult:
-fuzzy(1,65536) fails-if(B2G) fuzzy-if(Android,9,65536)                                random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.5&alpha          wrapper.html?colors-half-alpha.png
+# random-if(B2G) from bug 983650
+fuzzy(1,65536) random-if(B2G) fuzzy-if(Android,9,65536)                                random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.5&alpha          wrapper.html?colors-half-alpha.png
 fuzzy(1,65536) fails-if(B2G) fuzzy-if(Android,9,65536) fails-if(cocoaWidget||Android) random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=0.5&alphaVal=0.5&alpha          wrapper.html?half-colors-half-alpha.png
-fuzzy(1,65536) fails-if(B2G) fuzzy-if(Android,9,65536)                                random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=0.5&alphaVal=0.5&alpha&premult  wrapper.html?colors-half-alpha.png
+# random-if(B2G) from bug 983650
+fuzzy(1,65536) random-if(B2G) fuzzy-if(Android,9,65536)                                random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=0.5&alphaVal=0.5&alpha&premult  wrapper.html?colors-half-alpha.png
 
 # Test over-bright premult:
-fuzzy(1,65536) fails-if(B2G) fuzzy-if(Android,9,65536) random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.5&alpha&premult  wrapper.html?colors-half-alpha.png
+# random-if(B2G) from bug 983650
+fuzzy(1,65536) random-if(B2G) fuzzy-if(Android,9,65536) random-if(Android&&AndroidVersion<15)  == webgl-color-alpha-test.html?colorVal=1.0&alphaVal=0.5&alpha&premult  wrapper.html?colors-half-alpha.png
 
 
 # Check for hanging framebuffer bindings:
                                        random-if(Android&&AndroidVersion<15)  == webgl-hanging-fb-test.html?__&________  wrapper.html?green.png
                                        random-if(Android&&AndroidVersion<15)  == webgl-hanging-fb-test.html?aa&________  wrapper.html?green.png
 pref(webgl.force-layers-readback,true) random-if(Android&&AndroidVersion<15)  == webgl-hanging-fb-test.html?__&readback  wrapper.html?green.png
 pref(webgl.force-layers-readback,true) random-if(Android&&AndroidVersion<15)  == webgl-hanging-fb-test.html?aa&readback  wrapper.html?green.png
 
--- a/content/html/content/src/nsTextEditorState.cpp
+++ b/content/html/content/src/nsTextEditorState.cpp
@@ -23,17 +23,17 @@
 #include "nsIDOMHTMLTextAreaElement.h"
 #include "nsITransactionManager.h"
 #include "nsIControllerContext.h"
 #include "nsAttrValue.h"
 #include "nsAttrValueInlines.h"
 #include "nsGenericHTMLElement.h"
 #include "nsIDOMEventListener.h"
 #include "nsIEditorObserver.h"
-#include "nsINativeKeyBindings.h"
+#include "nsIWidget.h"
 #include "nsIDocumentEncoder.h"
 #include "nsISelectionPrivate.h"
 #include "nsPIDOMWindow.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIEditor.h"
 #include "nsTextEditRules.h"
 #include "mozilla/Selection.h"
 #include "nsEventListenerManager.h"
@@ -44,19 +44,16 @@
 #include "mozilla/TextEvents.h"
 #include "mozilla/dom/ScriptSettings.h"
 
 using namespace mozilla;
 using namespace mozilla::dom;
 
 static NS_DEFINE_CID(kTextEditorCID, NS_TEXTEDITOR_CID);
 
-static nsINativeKeyBindings *sNativeInputBindings = nullptr;
-static nsINativeKeyBindings *sNativeTextAreaBindings = nullptr;
-
 class MOZ_STACK_CLASS ValueSetter
 {
 public:
   ValueSetter(nsIEditor* aEditor)
     : mEditor(aEditor)
   {
     MOZ_ASSERT(aEditor);
   
@@ -674,18 +671,16 @@ public:
   NS_DECL_NSIDOMEVENTLISTENER
 
   NS_DECL_NSIEDITOROBSERVER
 
 protected:
 
   nsresult  UpdateTextInputCommands(const nsAString& commandsToUpdate);
 
-  NS_HIDDEN_(nsINativeKeyBindings*) GetKeyBindings();
-
 protected:
 
   nsIFrame* mFrame;
 
   nsITextControlElement* const mTxtCtrlElement;
 
   bool            mSelectionWasCollapsed;
   /**
@@ -793,17 +788,17 @@ nsTextInputListener::NotifySelectionChan
     return NS_OK;
 
   return UpdateTextInputCommands(NS_LITERAL_STRING("select"));
 }
 
 // END nsIDOMSelectionListener
 
 static void
-DoCommandCallback(const char *aCommand, void *aData)
+DoCommandCallback(Command aCommand, void* aData)
 {
   nsTextControlFrame *frame = static_cast<nsTextControlFrame*>(aData);
   nsIContent *content = frame->GetContent();
 
   nsCOMPtr<nsIControllers> controllers;
   nsCOMPtr<nsIDOMHTMLInputElement> input = do_QueryInterface(content);
   if (input) {
     input->GetControllers(getter_AddRefs(controllers));
@@ -816,27 +811,29 @@ DoCommandCallback(const char *aCommand, 
     }
   }
 
   if (!controllers) {
     NS_WARNING("Could not get controllers");
     return;
   }
 
+  const char* commandStr = WidgetKeyboardEvent::GetCommandStr(aCommand);
+
   nsCOMPtr<nsIController> controller;
-  controllers->GetControllerForCommand(aCommand, getter_AddRefs(controller));
+  controllers->GetControllerForCommand(commandStr, getter_AddRefs(controller));
   if (!controller) {
     return;
   }
 
   bool commandEnabled;
-  nsresult rv = controller->IsCommandEnabled(aCommand, &commandEnabled);
+  nsresult rv = controller->IsCommandEnabled(commandStr, &commandEnabled);
   NS_ENSURE_SUCCESS_VOID(rv);
   if (commandEnabled) {
-    controller->DoCommand(aCommand);
+    controller->DoCommand(commandStr);
   }
 }
 
 NS_IMETHODIMP
 nsTextInputListener::HandleEvent(nsIDOMEvent* aEvent)
 {
   bool defaultPrevented = false;
   nsresult rv = aEvent->GetDefaultPrevented(&defaultPrevented);
@@ -853,37 +850,35 @@ nsTextInputListener::HandleEvent(nsIDOME
   }
 
   WidgetKeyboardEvent* keyEvent =
     aEvent->GetInternalNSEvent()->AsKeyboardEvent();
   if (!keyEvent) {
     return NS_ERROR_UNEXPECTED;
   }
 
-  nsINativeKeyBindings *bindings = GetKeyBindings();
-  if (bindings) {
-    bool handled = false;
-    switch (keyEvent->message) {
-      case NS_KEY_DOWN:
-        handled = bindings->KeyDown(*keyEvent, DoCommandCallback, mFrame);
-        break;
-      case NS_KEY_UP:
-        handled = bindings->KeyUp(*keyEvent, DoCommandCallback, mFrame);
-        break;
-      case NS_KEY_PRESS:
-        handled = bindings->KeyPress(*keyEvent, DoCommandCallback, mFrame);
-        break;
-      default:
-        MOZ_CRASH("Unknown key message");
-    }
-    if (handled) {
-      aEvent->PreventDefault();
-    }
+  if (keyEvent->message != NS_KEY_PRESS) {
+    return NS_OK;
   }
 
+  nsIWidget::NativeKeyBindingsType nativeKeyBindingsType =
+    mTxtCtrlElement->IsTextArea() ?
+      nsIWidget::NativeKeyBindingsForMultiLineEditor :
+      nsIWidget::NativeKeyBindingsForSingleLineEditor;
+  nsIWidget* widget = keyEvent->widget;
+  // If the event is created by chrome script, the widget is nullptr.
+  if (!widget) {
+    widget = mFrame->GetNearestWidget();
+    NS_ENSURE_TRUE(widget, NS_OK);
+  }
+                                         
+  if (widget->ExecuteNativeKeyBinding(nativeKeyBindingsType,
+                                      *keyEvent, DoCommandCallback, mFrame)) {
+    aEvent->PreventDefault();
+  }
   return NS_OK;
 }
 
 // BEGIN nsIEditorObserver
 
 NS_IMETHODIMP
 nsTextInputListener::EditAction()
 {
@@ -942,47 +937,16 @@ nsTextInputListener::UpdateTextInputComm
   NS_ENSURE_TRUE(doc, NS_ERROR_FAILURE);
 
   nsPIDOMWindow *domWindow = doc->GetWindow();
   NS_ENSURE_TRUE(domWindow, NS_ERROR_FAILURE);
 
   return domWindow->UpdateCommands(commandsToUpdate);
 }
 
-nsINativeKeyBindings*
-nsTextInputListener::GetKeyBindings()
-{
-  if (mTxtCtrlElement->IsTextArea()) {
-    static bool sNoTextAreaBindings = false;
-
-    if (!sNativeTextAreaBindings && !sNoTextAreaBindings) {
-      CallGetService(NS_NATIVEKEYBINDINGS_CONTRACTID_PREFIX "textarea",
-                     &sNativeTextAreaBindings);
-
-      if (!sNativeTextAreaBindings) {
-        sNoTextAreaBindings = true;
-      }
-    }
-
-    return sNativeTextAreaBindings;
-  }
-
-  static bool sNoInputBindings = false;
-  if (!sNativeInputBindings && !sNoInputBindings) {
-    CallGetService(NS_NATIVEKEYBINDINGS_CONTRACTID_PREFIX "input",
-                   &sNativeInputBindings);
-
-    if (!sNativeInputBindings) {
-      sNoInputBindings = true;
-    }
-  }
-
-  return sNativeInputBindings;
-}
-
 // END nsTextInputListener
 
 // nsTextEditorState
 
 nsTextEditorState::nsTextEditorState(nsITextControlElement* aOwningElement)
   : mTextCtrlElement(aOwningElement),
     mRestoringSelection(nullptr),
     mBoundFrame(nullptr),
@@ -1977,23 +1941,16 @@ nsTextEditorState::InitializeKeyboardEve
     manager->AddEventListenerByType(mTextListener,
                                     NS_LITERAL_STRING("keyup"),
                                     dom::TrustedEventsAtSystemGroupBubble());
   }
 
   mSelCon->SetScrollableFrame(do_QueryFrame(mBoundFrame->GetFirstPrincipalChild()));
 }
 
-/* static */ void
-nsTextEditorState::ShutDown()
-{
-  NS_IF_RELEASE(sNativeTextAreaBindings);
-  NS_IF_RELEASE(sNativeInputBindings);
-}
-
 void
 nsTextEditorState::ValueWasChanged(bool aNotify)
 {
   // placeholder management
   if (!mPlaceholderDiv) {
     return;
   }
 
--- a/content/html/content/src/nsTextEditorState.h
+++ b/content/html/content/src/nsTextEditorState.h
@@ -183,19 +183,16 @@ public:
 
   /**
    * Get the maxlength attribute
    * @param aMaxLength the value of the max length attr
    * @returns false if attr not defined
    */
   bool GetMaxLength(int32_t* aMaxLength);
 
-  /* called to free up native keybinding services */
-  static NS_HIDDEN_(void) ShutDown();
-
   void ClearValueCache() { mCachedValue.Truncate(); }
 
   void HideSelectionIfBlurred();
 
   struct SelectionProperties {
     SelectionProperties() : mStart(0), mEnd(0),
       mDirection(nsITextControlFrame::eForward) {}
     bool IsDefault() const {
--- a/dom/bindings/BindingDeclarations.h
+++ b/dom/bindings/BindingDeclarations.h
@@ -42,16 +42,22 @@ protected:
 };
 
 // Struct that serves as a base class for all typed arrays and array buffers and
 // array buffer views.  Particularly useful so we can use IsBaseOf to detect
 // typed array/buffer/view template arguments.
 struct AllTypedArraysBase {
 };
 
+// Struct that serves as a base class for all owning unions.
+// Particularly useful so we can use IsBaseOf to detect owning union
+// template arguments.
+struct AllOwningUnionBase {
+};
+
 
 struct EnumEntry {
   const char* value;
   size_t length;
 };
 
 class MOZ_STACK_CLASS GlobalObject
 {
--- a/dom/bindings/BindingUtils.h
+++ b/dom/bindings/BindingUtils.h
@@ -1652,109 +1652,124 @@ public:
 };
 
 } // namespace binding_detail
 
 // Class used to trace sequences, with specializations for various
 // sequence types.
 template<typename T,
          bool isDictionary=IsBaseOf<DictionaryBase, T>::value,
-         bool isTypedArray=IsBaseOf<AllTypedArraysBase, T>::value>
+         bool isTypedArray=IsBaseOf<AllTypedArraysBase, T>::value,
+         bool isOwningUnion=IsBaseOf<AllOwningUnionBase, T>::value>
 class SequenceTracer
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 };
 
 // sequence<object> or sequence<object?>
 template<>
-class SequenceTracer<JSObject*, false, false>
+class SequenceTracer<JSObject*, false, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, JSObject** objp, JSObject** end) {
     for (; objp != end; ++objp) {
       JS_CallObjectTracer(trc, objp, "sequence<object>");
     }
   }
 };
 
 // sequence<any>
 template<>
-class SequenceTracer<JS::Value, false, false>
+class SequenceTracer<JS::Value, false, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, JS::Value* valp, JS::Value* end) {
     for (; valp != end; ++valp) {
       JS_CallValueTracer(trc, valp, "sequence<any>");
     }
   }
 };
 
 // sequence<sequence<T>>
 template<typename T>
-class SequenceTracer<Sequence<T>, false, false>
+class SequenceTracer<Sequence<T>, false, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, Sequence<T>* seqp, Sequence<T>* end) {
     for (; seqp != end; ++seqp) {
       DoTraceSequence(trc, *seqp);
     }
   }
 };
 
 // sequence<sequence<T>> as return value
 template<typename T>
-class SequenceTracer<nsTArray<T>, false, false>
+class SequenceTracer<nsTArray<T>, false, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, nsTArray<T>* seqp, nsTArray<T>* end) {
     for (; seqp != end; ++seqp) {
       DoTraceSequence(trc, *seqp);
     }
   }
 };
 
 // sequence<someDictionary>
 template<typename T>
-class SequenceTracer<T, true, false>
+class SequenceTracer<T, true, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, T* dictp, T* end) {
     for (; dictp != end; ++dictp) {
       dictp->TraceDictionary(trc);
     }
   }
 };
 
 // sequence<SomeTypedArray>
 template<typename T>
-class SequenceTracer<T, false, true>
+class SequenceTracer<T, false, true, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, T* arrayp, T* end) {
     for (; arrayp != end; ++arrayp) {
       arrayp->TraceSelf(trc);
     }
   }
 };
 
+// sequence<SomeOwningUnion>
+template<typename T>
+class SequenceTracer<T, false, false, true>
+{
+  explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
+
+public:
+  static void TraceSequence(JSTracer* trc, T* arrayp, T* end) {
+    for (; arrayp != end; ++arrayp) {
+      arrayp->TraceUnion(trc);
+    }
+  }
+};
+
 // sequence<T?> with T? being a Nullable<T>
 template<typename T>
-class SequenceTracer<Nullable<T>, false, false>
+class SequenceTracer<Nullable<T>, false, false, false>
 {
   explicit SequenceTracer() MOZ_DELETE; // Should never be instantiated
 
 public:
   static void TraceSequence(JSTracer* trc, Nullable<T>* seqp,
                             Nullable<T>* end) {
     for (; seqp != end; ++seqp) {
       if (!seqp->IsNull()) {
--- a/dom/bindings/Codegen.py
+++ b/dom/bindings/Codegen.py
@@ -907,30 +907,41 @@ def UnionTypes(descriptors, dictionaries
                         headers.add("jsfriendapi.h")
                         headers.add("mozilla/dom/TypedArray.h")
                     else:
                         for p in providers:
                             try:
                                 typeDesc = p.getDescriptor(f.inner.identifier.name)
                             except NoSuchDescriptorError:
                                 continue
-                            declarations.add((typeDesc.nativeType, False))
-                            implheaders.add(typeDesc.headerFile)
+                            if typeDesc.interface.isCallback():
+                                # Callback interfaces always use strong refs, so
+                                # we need to include the right header to be able
+                                # to Release() in our inlined code.
+                                headers.add(typeDesc.headerFile)
+                            else:
+                                declarations.add((typeDesc.nativeType, False))
+                                implheaders.add(typeDesc.headerFile)
                 elif f.isDictionary():
                     # For a dictionary, we need to see its declaration in
                     # UnionTypes.h so we have its sizeof and know how big to
                     # make our union.
                     headers.add(CGHeaders.getDeclarationFilename(f.inner))
                     # And if it needs rooting, we need RootedDictionary too
                     if typeNeedsRooting(f):
                         headers.add("mozilla/dom/RootedDictionary.h")
                 elif f.isEnum():
                     # Need to see the actual definition of the enum,
                     # unfortunately.
                     headers.add(CGHeaders.getDeclarationFilename(f.inner))
+                elif f.isCallback():
+                    # Callbacks always use strong refs, so we need to include
+                    # the right header to be able to Release() in our inlined
+                    # code.
+                    headers.add(CGHeaders.getDeclarationFilename(f))
 
     map(addInfoForType, getAllTypes(descriptors, dictionaries, callbacks))
 
     return (headers, implheaders, declarations,
             CGList(itertools.chain(SortedDictValues(unionStructs),
                                    SortedDictValues(owningUnionStructs)), "\n"))
 
 def UnionConversions(descriptors, dictionaries, callbacks, config):
@@ -3271,17 +3282,20 @@ while (true) {
             setDictionary = CGGeneric("done = (failed = !%s.TrySetTo%s(cx, ${val}, ${mutableVal}, tryNext)) || !tryNext;" % (unionArgumentObj, name))
             names.append(name)
         else:
             setDictionary = None
 
         objectMemberTypes = filter(lambda t: t.isObject(), memberTypes)
         if len(objectMemberTypes) > 0:
             assert len(objectMemberTypes) == 1
-            object = CGGeneric("%s.SetToObject(cx, argObj);\n"
+            # Very important to NOT construct a temporary Rooted here, since the
+            # SetToObject call can call a Rooted constructor and we need to keep
+            # stack discipline for Rooted.
+            object = CGGeneric("%s.SetToObject(cx, &${val}.toObject());\n"
                                "done = true;" % unionArgumentObj)
             names.append(objectMemberTypes[0].name)
         else:
             object = None
 
         hasObjectTypes = interfaceObject or arrayObject or dateObject or callbackObject or object
         if hasObjectTypes:
             # "object" is not distinguishable from other types
@@ -3298,17 +3312,17 @@ while (true) {
             if interfaceObject:
                 assert not object
                 if templateBody:
                     templateBody = CGIfWrapper(templateBody, "!done")
                 templateBody = CGList([interfaceObject, templateBody], "\n")
             else:
                 templateBody = CGList([templateBody, object], "\n")
 
-            if any([arrayObject, dateObject, callbackObject, object]):
+            if dateObject:
                 templateBody.prepend(CGGeneric("JS::Rooted<JSObject*> argObj(cx, &${val}.toObject());"))
             templateBody = CGIfWrapper(templateBody, "${val}.isObject()")
         else:
             templateBody = CGGeneric()
 
         if setDictionary:
             assert not object
             templateBody = CGList([templateBody,
@@ -7182,17 +7196,19 @@ class CGUnionStruct(CGThing):
                         body=CGSwitch("aOther.mType", assignmentCases).define()))
                 disallowCopyConstruction = False
             else:
                 disallowCopyConstruction = True
         else:
             disallowCopyConstruction = True
 
         friend="  friend class %sArgument;\n" % str(self.type) if not self.ownsMembers else ""
+        bases = [ClassBase("AllOwningUnionBase")] if self.ownsMembers else []
         return CGClass(selfName,
+                       bases=bases,
                        members=members,
                        constructors=constructors,
                        methods=methods,
                        disallowCopyConstruction=disallowCopyConstruction,
                        extradeclarations=friend,
                        destructor=ClassDestructor(visibility="public",
                                                   body=dtor,
                                                   bodyInHeader=not self.ownsMembers),
--- a/dom/bindings/test/TestBindingHeader.h
+++ b/dom/bindings/test/TestBindingHeader.h
@@ -519,16 +519,17 @@ public:
   void PassUnion7(JSContext*, const ObjectOrStringOrLong& arg);
   void PassUnion8(JSContext*, const ObjectOrStringOrBoolean& arg);
   void PassUnion9(JSContext*, const ObjectOrStringOrLongOrBoolean& arg);
   void PassUnion10(const EventInitOrLong& arg);
   void PassUnion11(JSContext*, const CustomEventInitOrLong& arg);
   void PassUnion12(const EventInitOrLong& arg);
   void PassUnion13(JSContext*, const ObjectOrLongOrNull& arg);
   void PassUnion14(JSContext*, const ObjectOrLongOrNull& arg);
+  void PassUnionWithCallback(const EventHandlerNonNullOrNullOrLong& arg);
 #endif
   void PassNullableUnion(JSContext*, const Nullable<ObjectOrLong>&);
   void PassOptionalUnion(JSContext*, const Optional<ObjectOrLong>&);
   void PassOptionalNullableUnion(JSContext*, const Optional<Nullable<ObjectOrLong> >&);
   void PassOptionalNullableUnionWithDefaultValue(JSContext*, const Nullable<ObjectOrLong>&);
   //void PassUnionWithInterfaces(const TestInterfaceOrTestExternalInterface& arg);
   //void PassUnionWithInterfacesAndNullable(const TestInterfaceOrNullOrTestExternalInterface& arg);
   void PassUnionWithArrayBuffer(const ArrayBufferOrLong&);
@@ -561,16 +562,17 @@ public:
   void PassNullableUnionWithDefaultValue7(const Nullable<UnrestrictedDoubleOrString>& arg);
   void PassNullableUnionWithDefaultValue8(const Nullable<UnrestrictedDoubleOrString>& arg);
   void PassNullableUnionWithDefaultValue9(const Nullable<UnrestrictedDoubleOrString>& arg);
   void PassNullableUnionWithDefaultValue10(const Nullable<UnrestrictedFloatOrString>& arg);
   void PassNullableUnionWithDefaultValue11(const Nullable<UnrestrictedFloatOrString>& arg);
   void PassNullableUnionWithDefaultValue12(const Nullable<UnrestrictedFloatOrString>& arg);
 
   void PassSequenceOfUnions(const Sequence<OwningCanvasPatternOrCanvasGradient>&);
+  void PassSequenceOfUnions2(JSContext*, const Sequence<OwningObjectOrLong>&);
   void PassVariadicUnion(const Sequence<OwningCanvasPatternOrCanvasGradient>&);
 
   void PassSequenceOfNullableUnions(const Sequence<Nullable<OwningCanvasPatternOrCanvasGradient>>&);
   void PassVariadicNullableUnion(const Sequence<Nullable<OwningCanvasPatternOrCanvasGradient>>&);
 
   void ReceiveUnion(OwningCanvasPatternOrCanvasGradient&);
   void ReceiveUnion2(JSContext*, OwningObjectOrLong&);
   void ReceiveUnionContainingNull(OwningCanvasPatternOrNullOrCanvasGradient&);
--- a/dom/bindings/test/TestCodeGen.webidl
+++ b/dom/bindings/test/TestCodeGen.webidl
@@ -470,16 +470,17 @@ interface TestInterface {
   void passUnion7((object or DOMString or long) arg);
   void passUnion8((object or DOMString or boolean) arg);
   void passUnion9((object or DOMString or long or boolean) arg);
   void passUnion10(optional (EventInit or long) arg);
   void passUnion11(optional (CustomEventInit or long) arg);
   void passUnion12(optional (EventInit or long) arg = 5);
   void passUnion13(optional (object or long?) arg = null);
   void passUnion14(optional (object or long?) arg = 5);
+  void passUnionWithCallback((EventHandler or long) arg);
 #endif
   void passUnionWithNullable((object? or long) arg);
   void passNullableUnion((object or long)? arg);
   void passOptionalUnion(optional (object or long) arg);
   void passOptionalNullableUnion(optional (object or long)? arg);
   void passOptionalNullableUnionWithDefaultValue(optional (object or long)? arg = null);
   //void passUnionWithInterfaces((TestInterface or TestExternalInterface) arg);
   //void passUnionWithInterfacesAndNullable((TestInterface? or TestExternalInterface) arg);
@@ -516,16 +517,17 @@ interface TestInterface {
   void passNullableUnionWithDefaultValue7(optional (unrestricted double or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue8(optional (unrestricted double or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue9(optional (unrestricted double or DOMString)? arg = null);
   void passNullableUnionWithDefaultValue10(optional (unrestricted float or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue11(optional (unrestricted float or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue12(optional (unrestricted float or DOMString)? arg = null);
 
   void passSequenceOfUnions(sequence<(CanvasPattern or CanvasGradient)> arg);
+  void passSequenceOfUnions2(sequence<(object or long)> arg);
   void passVariadicUnion((CanvasPattern or CanvasGradient)... arg);
 
   void passSequenceOfNullableUnions(sequence<(CanvasPattern or CanvasGradient)?> arg);
   void passVariadicNullableUnion((CanvasPattern or CanvasGradient)?... arg);
 
   (CanvasPattern or CanvasGradient) receiveUnion();
   (object or long) receiveUnion2();
   (CanvasPattern? or CanvasGradient) receiveUnionContainingNull();
--- a/dom/bindings/test/TestExampleGen.webidl
+++ b/dom/bindings/test/TestExampleGen.webidl
@@ -363,16 +363,17 @@ interface TestExampleInterface {
   void passUnion7((object or DOMString or long) arg);
   void passUnion8((object or DOMString or boolean) arg);
   void passUnion9((object or DOMString or long or boolean) arg);
   void passUnion10(optional (EventInit or long) arg);
   void passUnion11(optional (CustomEventInit or long) arg);
   void passUnion12(optional (EventInit or long) arg = 5);
   void passUnion13(optional (object or long?) arg = null);
   void passUnion14(optional (object or long?) arg = 5);
+  void passUnionWithCallback((EventHandler or long) arg);
 #endif
   void passUnionWithNullable((object? or long) arg);
   void passNullableUnion((object or long)? arg);
   void passOptionalUnion(optional (object or long) arg);
   void passOptionalNullableUnion(optional (object or long)? arg);
   void passOptionalNullableUnionWithDefaultValue(optional (object or long)? arg = null);
   //void passUnionWithInterfaces((TestInterface or TestExternalInterface) arg);
   //void passUnionWithInterfacesAndNullable((TestInterface? or TestExternalInterface) arg);
@@ -409,16 +410,17 @@ interface TestExampleInterface {
   void passNullableUnionWithDefaultValue7(optional (unrestricted double or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue8(optional (unrestricted double or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue9(optional (unrestricted double or DOMString)? arg = null);
   void passNullableUnionWithDefaultValue10(optional (unrestricted float or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue11(optional (unrestricted float or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue12(optional (unrestricted float or DOMString)? arg = null);
 
   void passSequenceOfUnions(sequence<(CanvasPattern or CanvasGradient)> arg);
+  void passSequenceOfUnions2(sequence<(object or long)> arg);
   void passVariadicUnion((CanvasPattern or CanvasGradient)... arg);
 
   void passSequenceOfNullableUnions(sequence<(CanvasPattern or CanvasGradient)?> arg);
   void passVariadicNullableUnion((CanvasPattern or CanvasGradient)?... arg);
 
   //(CanvasPattern or CanvasGradient) receiveUnion();
   //(object or long) receiveUnion2();
   //(CanvasPattern? or CanvasGradient) receiveUnionContainingNull();
--- a/dom/bindings/test/TestJSImplGen.webidl
+++ b/dom/bindings/test/TestJSImplGen.webidl
@@ -384,16 +384,17 @@ interface TestJSImplInterface {
   void passUnion7((object or DOMString or long) arg);
   void passUnion8((object or DOMString or boolean) arg);
   void passUnion9((object or DOMString or long or boolean) arg);
   void passUnion10(optional (EventInit or long) arg);
   void passUnion11(optional (CustomEventInit or long) arg);
   void passUnion12(optional (EventInit or long) arg = 5);
   void passUnion13(optional (object or long?) arg = null);
   void passUnion14(optional (object or long?) arg = 5);
+  void passUnionWithCallback((EventHandler or long) arg);
 #endif
   void passUnionWithNullable((object? or long) arg);
   void passNullableUnion((object or long)? arg);
   void passOptionalUnion(optional (object or long) arg);
   void passOptionalNullableUnion(optional (object or long)? arg);
   void passOptionalNullableUnionWithDefaultValue(optional (object or long)? arg = null);
   //void passUnionWithInterfaces((TestJSImplInterface or TestExternalInterface) arg);
   //void passUnionWithInterfacesAndNullable((TestJSImplInterface? or TestExternalInterface) arg);
@@ -430,16 +431,17 @@ interface TestJSImplInterface {
   void passNullableUnionWithDefaultValue7(optional (unrestricted double or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue8(optional (unrestricted double or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue9(optional (unrestricted double or DOMString)? arg = null);
   void passNullableUnionWithDefaultValue10(optional (unrestricted float or DOMString)? arg = "");
   void passNullableUnionWithDefaultValue11(optional (unrestricted float or DOMString)? arg = 1);
   void passNullableUnionWithDefaultValue12(optional (unrestricted float or DOMString)? arg = null);
 
   void passSequenceOfUnions(sequence<(CanvasPattern or CanvasGradient)> arg);
+  void passSequenceOfUnions2(sequence<(object or long)> arg);
   void passVariadicUnion((CanvasPattern or CanvasGradient)... arg);
 
   void passSequenceOfNullableUnions(sequence<(CanvasPattern or CanvasGradient)?> arg);
   void passVariadicNullableUnion((CanvasPattern or CanvasGradient)?... arg);
 
   //(CanvasPattern or CanvasGradient) receiveUnion();
   //(object or long) receiveUnion2();
   //(CanvasPattern? or CanvasGradient) receiveUnionContainingNull();
--- a/dom/identity/DOMIdentity.jsm
+++ b/dom/identity/DOMIdentity.jsm
@@ -330,16 +330,17 @@ this.DOMIdentity = {
 
   _watch: function DOMIdentity__watch(message, targetMM) {
     log("DOMIdentity__watch: " + message.id);
     let context = this.newContext(message, targetMM);
     this.getService(message).RP.watch(context);
   },
 
   _unwatch: function DOMIdentity_unwatch(message, targetMM) {
+    log("DOMIDentity__unwatch: " + message.id);
     this.getService(message).RP.unwatch(message.id, targetMM);
   },
 
   _request: function DOMIdentity__request(message) {
     this.getService(message).RP.request(message.id, message);
   },
 
   _logout: function DOMIdentity__logout(message) {
--- a/dom/identity/nsDOMIdentity.js
+++ b/dom/identity/nsDOMIdentity.js
@@ -665,17 +665,17 @@ nsDOMIdentity.prototype = {
     message.audience = _audience;
 
     this._log("DOMIdentityMessage: " + JSON.stringify(message));
 
     return message;
   },
 
   uninit: function DOMIdentity_uninit() {
-    this._log("nsDOMIdentity uninit()");
+    this._log("nsDOMIdentity uninit() " + this._id);
     this._identityInternal._mm.sendAsyncMessage(
       "Identity:RP:Unwatch",
       { id: this._id }
     );
   }
 
 };
 
--- a/dom/identity/tests/mochitest/file_declareAudience.html
+++ b/dom/identity/tests/mochitest/file_declareAudience.html
@@ -36,16 +36,20 @@
 
   onmessage = function(event) {
     navigator.mozId.watch({
       wantIssuer: "firefox-accounts",
       audience: event.data.audience,
       onready: onready,
       onlogin: onlogin,
       onerror: onerror,
-      onlogout: function() {},
+
+      // onlogout will actually be called every time watch() is invoked,
+      // because fxa will find no signed-in user and so trigger logout.
+      // For this test, though, we don't care and just ignore logout.
+      onlogout: function () {},
     });
   };
 
 </script>
 </div>
 </body>
 </html>
--- a/dom/identity/tests/mochitest/test_declareAudience.html
+++ b/dom/identity/tests/mochitest/test_declareAudience.html
@@ -29,17 +29,23 @@ Components.utils.import("resource://gre/
 is("appStatus" in document.nodePrincipal, true,
    "appStatus should be present in nsIPrincipal, if not the rest of this test will fail");
 
 // Mock the Firefox Accounts manager to generate a keypair and provide a fake
 // cert for the caller on each getAssertion request.
 function MockFXAManager() {}
 
 MockFXAManager.prototype = {
-  getAssertion: function(audience) {
+  getAssertion: function(audience, options) {
+    // Always reject a request for a silent assertion, simulating the
+    // scenario in which there is no signed-in user to begin with.
+    if (options.silent) {
+      return Promise.resolve(null);
+    }
+
     let deferred = Promise.defer();
     jwcrypto.generateKeyPair("DS160", (err, kp) => {
       if (err) {
         return deferred.reject(err);
       }
       jwcrypto.generateAssertion("fake-cert", kp, audience, (err, assertion) => {
         if (err) {
           return deferred.reject(err);
@@ -132,17 +138,17 @@ let testRunner = runTest();
 // have more than one message from the onerror handler in the client.  So we keep
 // track of received errors; once they reach the expected count, we are done.
 function receiveMessage(event) {
   let result = JSON.parse(event.data);
   let app = apps[appIndex];
   let expected = app.expected;
 
   is(result.success, expected.success,
-     "Assertion request " + (expected.success ? "succeeds" : "fails"));
+     "Assertion request succeeds");
 
   if (expected.success) {
     // Confirm that the assertion audience and origin are as expected
     let components = extractAssertionComponents(result.backedAssertion);
     is(components.payload.aud, app.wantAudience || app.origin,
        "Got desired assertion audience");
 
   } else {
@@ -175,17 +181,16 @@ function receiveMessage(event) {
   }
 }
 
 window.addEventListener("message", receiveMessage, false, true);
 
 function runTest() {
   for (let app of apps) {
     dump("** Testing " + app.title + "\n");
-
     // Set up state for message handler
     expectedErrors = 0;
     receivedErrors = [];
     if (!app.expected.success) {
       expectedErrors += 1;
     }
     if (app.expected.underprivileged) {
       expectedErrors += 1;
--- a/dom/identity/tests/mochitest/test_syntheticEvents.html
+++ b/dom/identity/tests/mochitest/test_syntheticEvents.html
@@ -26,17 +26,20 @@ Components.utils.import("resource://gre/
 Components.utils.import("resource://gre/modules/identity/jwcrypto.jsm");
 Components.utils.import("resource://gre/modules/identity/FirefoxAccounts.jsm");
 
 // Mock the Firefox Accounts manager to give a dummy assertion, just to confirm
 // that we're making the trip through the dom/identity and toolkit/identity
 // plumbing.
 function MockFXAManager() {}
 MockFXAManager.prototype = {
-  getAssertion: function() {
+  getAssertion: function(audience, options) {
+    if (options.silent) {
+      return Promise.resolve(null);
+    }
     return Promise.resolve("here~you.go.dude");
   }
 };
 
 let originalManager = FirefoxAccounts.fxAccountsManager;
 FirefoxAccounts.fxAccountsManager = new MockFXAManager();
 
 // Mock IdentityService (Persona) so we can test request() while not handling
--- a/dom/media/tests/mochitest/pc.js
+++ b/dom/media/tests/mochitest/pc.js
@@ -1589,18 +1589,18 @@ PeerConnectionWrapper.prototype = {
 
     // Use spec way of enumerating stats
     var counters = {};
     for (var key in stats) {
       if (stats.hasOwnProperty(key)) {
         var res = stats[key];
         // validate stats
         ok(res.id == key, "Coherent stats id");
-        var nowish = Date.now() + 10000;        // TODO: severe drift observed
-        var minimum = this.whenCreated - 10000; // on Windows XP (Bug 979649)
+        var nowish = Date.now() + 1000;        // TODO: clock drift observed
+        var minimum = this.whenCreated - 1000; // on Windows XP (Bug 979649)
         ok(res.timestamp >= minimum,
            "Valid " + (res.isRemote? "rtcp" : "rtp") + " timestamp " +
                res.timestamp + " >= " + minimum + " (" +
                (res.timestamp - minimum) + " ms)");
         ok(res.timestamp <= nowish,
            "Valid " + (res.isRemote? "rtcp" : "rtp") + " timestamp " +
                res.timestamp + " <= " + nowish + " (" +
                (res.timestamp - nowish) + " ms)");
@@ -1626,22 +1626,20 @@ PeerConnectionWrapper.prototype = {
               }
               if (res.remoteId) {
                 var rem = stats[res.remoteId];
                 ok(rem.isRemote, "Remote is rtcp");
                 ok(rem.remoteId == res.id, "Remote backlink match");
                 if(res.type == "outboundrtp") {
                   ok(rem.type == "inboundrtp", "Rtcp is inbound");
                   ok(rem.packetsReceived !== undefined, "Rtcp packetsReceived");
-                  // TODO: Re-enable once Bug 980497 is fixed
-                  // ok(rem.packetsReceived <= res.packetsSent, "No more than sent");
+                  ok(rem.packetsReceived <= res.packetsSent, "No more than sent");
                   ok(rem.packetsLost !== undefined, "Rtcp packetsLost");
                   ok(rem.bytesReceived >= rem.packetsReceived * 8, "Rtcp bytesReceived");
-                  // TODO: Re-enable once Bug 980497 is fixed
-                  // ok(rem.bytesReceived <= res.bytesSent, "No more than sent bytes");
+                  ok(rem.bytesReceived <= res.bytesSent, "No more than sent bytes");
                   ok(rem.jitter !== undefined, "Rtcp jitter");
                 } else {
                   ok(rem.type == "outboundrtp", "Rtcp is outbound");
                   ok(rem.packetsSent !== undefined, "Rtcp packetsSent");
                   // We may have received more than outdated Rtcp packetsSent
                   ok(rem.bytesSent >= rem.packetsSent * 8, "Rtcp bytesSent");
                 }
                 ok(rem.ssrc == res.ssrc, "Remote ssrc match");
--- a/dom/network/src/NetworkStatsService.jsm
+++ b/dom/network/src/NetworkStatsService.jsm
@@ -36,16 +36,20 @@ const NETWORK_STATUS_READY   = 0;
 // enabled 3G since boot).
 const NETWORK_STATUS_STANDBY = 1;
 // Network is not present, but stored in database by the previous connections.
 const NETWORK_STATUS_AWAY    = 2;
 
 // The maximum traffic amount can be saved in the |cachedStats|.
 const MAX_CACHED_TRAFFIC = 500 * 1000 * 1000; // 500 MB
 
+const QUEUE_TYPE_UPDATE_STATS = 0;
+const QUEUE_TYPE_UPDATE_CACHE = 1;
+const QUEUE_TYPE_WRITE_CACHE = 2;
+
 XPCOMUtils.defineLazyServiceGetter(this, "ppmm",
                                    "@mozilla.org/parentprocessmessagemanager;1",
                                    "nsIMessageListenerManager");
 
 XPCOMUtils.defineLazyServiceGetter(this, "gRil",
                                    "@mozilla.org/ril;1",
                                    "nsIRadioInterfaceLayer");
 
@@ -393,17 +397,16 @@ this.NetworkStatsService = {
    * Function called from manager to get stats from database.
    * In order to return updated stats, first is performed a call to
    * updateAllStats function, which will get last stats from netd
    * and update the database.
    * Then, depending on the request (stats per appId or total stats)
    * it retrieve them from database and return to the manager.
    */
   getSamples: function getSamples(mm, msg) {
-    let self = this;
     let network = msg.network;
     let netId = this.getNetworkId(network.id, network.type);
 
     let appId = 0;
     let appManifestURL = msg.appManifestURL;
     if (appManifestURL) {
       appId = appsService.getAppLocalIdByManifestURL(appManifestURL);
 
@@ -415,46 +418,54 @@ this.NetworkStatsService = {
       }
     }
 
     let serviceType = msg.serviceType || "";
 
     let start = new Date(msg.start);
     let end = new Date(msg.end);
 
+    let callback = (function (aError, aResult) {
+      this._db.find(function onStatsFound(aError, aResult) {
+        mm.sendAsyncMessage("NetworkStats:Get:Return",
+                            { id: msg.id, error: aError, result: aResult });
+      }, appId, serviceType, network, start, end, appManifestURL);
+    }).bind(this);
+
     this.validateNetwork(network, function onValidateNetwork(aNetId) {
       if (!aNetId) {
         mm.sendAsyncMessage("NetworkStats:Get:Return",
                             { id: msg.id, error: "Invalid connectionType", result: null });
         return;
       }
 
       // If network is currently active we need to update the cached stats first before
       // retrieving stats from the DB.
-      if (self._networks[aNetId].status == NETWORK_STATUS_READY) {
-        self.updateStats(aNetId, function onStatsUpdated(aResult, aMessage) {
-          debug("getstats for network " + network.id + " of type " + network.type);
-          debug("appId: " + appId + " from appManifestURL: " + appManifestURL);
+      if (this._networks[aNetId].status == NETWORK_STATUS_READY) {
+        debug("getstats for network " + network.id + " of type " + network.type);
+        debug("appId: " + appId + " from appManifestURL: " + appManifestURL);
+        debug("serviceType: " + serviceType);
 
-          self.updateCachedStats(function onStatsUpdated(aResult, aMessage) {
-            self._db.find(function onStatsFound(aError, aResult) {
-              mm.sendAsyncMessage("NetworkStats:Get:Return",
-                                  { id: msg.id, error: aError, result: aResult });
-            }, appId, serviceType, network, start, end, appManifestURL);
-          });
-        });
+        if (appId || serviceType) {
+          this.updateCachedStats(callback);
+          return;
+        }
+
+        this.updateStats(aNetId, function onStatsUpdated(aResult, aMessage) {
+          this.updateCachedStats(callback);
+        }.bind(this));
         return;
       }
 
       // Network not active, so no need to update
-      self._db.find(function onStatsFound(aError, aResult) {
+      this._db.find(function onStatsFound(aError, aResult) {
         mm.sendAsyncMessage("NetworkStats:Get:Return",
                             { id: msg.id, error: aError, result: aResult });
       }, appId, serviceType, network, start, end, appManifestURL);
-    });
+    }.bind(this));
   },
 
   clearInterfaceStats: function clearInterfaceStats(mm, msg) {
     let self = this;
     let network = msg.network;
 
     debug("clear stats for network " + network.id + " of type " + network.type);
 
@@ -510,85 +521,89 @@ this.NetworkStatsService = {
           mm.sendAsyncMessage("NetworkStats:ClearAll:Return",
                               { id: msg.id, error: aError, result: aResult });
         });
       });
     });
   },
 
   updateAllStats: function updateAllStats(aCallback) {
-    // Update |cachedStats|.
-    this.updateCachedStats();
-
     let elements = [];
     let lastElement = null;
+    let callback = (function (success, message) {
+      this.updateCachedStats(aCallback);
+    }).bind(this);
 
     // For each connectionType create an object containning the type
     // and the 'queueIndex', the 'queueIndex' is an integer representing
     // the index of a connection type in the global queue array. So, if
     // the connection type is already in the queue it is not appended again,
     // else it is pushed in 'elements' array, which later will be pushed to
     // the queue array.
     for (let netId in this._networks) {
       if (this._networks[netId].status != NETWORK_STATUS_READY) {
         continue;
       }
 
       lastElement = { netId: netId,
-                      queueIndex: this.updateQueueIndex(netId)};
+                      queueIndex: this.updateQueueIndex(netId) };
 
       if (lastElement.queueIndex == -1) {
-        elements.push({ netId: lastElement.netId, callbacks: [] });
+        elements.push({ netId:     lastElement.netId,
+                        callbacks: [],
+                        queueType: QUEUE_TYPE_UPDATE_STATS });
       }
     }
 
     if (!lastElement) {
       // No elements need to be updated, probably because status is different than
       // NETWORK_STATUS_READY.
       if (aCallback) {
         aCallback(true, "OK");
       }
       return;
     }
 
     if (elements.length > 0) {
       // If length of elements is greater than 0, callback is set to
       // the last element.
-      elements[elements.length - 1].callbacks.push(aCallback);
+      elements[elements.length - 1].callbacks.push(callback);
       this.updateQueue = this.updateQueue.concat(elements);
     } else {
       // Else, it means that all connection types are already in the queue to
       // be updated, so callback for this request is added to
       // the element in the main queue with the index of the last 'lastElement'.
       // But before is checked that element is still in the queue because it can
       // be processed while generating 'elements' array.
       let element = this.updateQueue[lastElement.queueIndex];
       if (aCallback &&
          (!element || element.netId != lastElement.netId)) {
         aCallback();
         return;
       }
 
-      this.updateQueue[lastElement.queueIndex].callbacks.push(aCallback);
+      this.updateQueue[lastElement.queueIndex].callbacks.push(callback);
     }
 
     // Call the function that process the elements of the queue.
     this.processQueue();
 
     if (DEBUG) {
       this.logAllRecords();
     }
   },
 
   updateStats: function updateStats(aNetId, aCallback) {
     // Check if the connection is in the main queue, push a new element
     // if it is not being processed or add a callback if it is.
     let index = this.updateQueueIndex(aNetId);
     if (index == -1) {
-      this.updateQueue.push({netId: aNetId, callbacks: [aCallback]});
+      this.updateQueue.push({ netId: aNetId,
+                              callbacks: [aCallback],
+                              queueType: QUEUE_TYPE_UPDATE_STATS });
     } else {
       this.updateQueue[index].callbacks.push(aCallback);
       return;
     }
 
     // Call the function that process the elements of the queue.
     this.processQueue();
   },
@@ -606,41 +621,49 @@ this.NetworkStatsService = {
    */
   processQueue: function processQueue(aResult, aMessage) {
     // If aResult is not undefined, the caller of the function is the result
     // of processing an element, so remove that element and call the callbacks
     // it has.
     if (aResult != undefined) {
       let item = this.updateQueue.shift();
       for (let callback of item.callbacks) {
-        if(callback) {
+        if (callback) {
           callback(aResult, aMessage);
         }
       }
     } else {
       // The caller is a function that has pushed new elements to the queue,
       // if isQueueRunning is false it means there is no processing currently
       // being done, so start.
       if (this.isQueueRunning) {
-        if(this.updateQueue.length > 1) {
-          return;
-        }
+        return;
       } else {
         this.isQueueRunning = true;
       }
     }
 
     // Check length to determine if queue is empty and stop processing.
     if (this.updateQueue.length < 1) {
       this.isQueueRunning = false;
       return;
     }
 
     // Call the update function for the next element.
-    this.update(this.updateQueue[0].netId, this.processQueue.bind(this));
+    switch (this.updateQueue[0].queueType) {
+      case QUEUE_TYPE_UPDATE_STATS:
+        this.update(this.updateQueue[0].netId, this.processQueue.bind(this));
+        break;
+      case QUEUE_TYPE_UPDATE_CACHE:
+        this.updateCache(this.processQueue.bind(this));
+        break;
+      case QUEUE_TYPE_WRITE_CACHE:
+        this.writeCache(this.updateQueue[0].stats, this.processQueue.bind(this));
+        break;
+    }
   },
 
   update: function update(aNetId, aCallback) {
     // Check if connection type is valid.
     if (!this._networks[aNetId]) {
       if (aCallback) {
         aCallback(false, "Invalid network " + aNetId);
       }
@@ -703,24 +726,21 @@ this.NetworkStatsService = {
    * Function responsible for receiving stats which are not from netd.
    */
   saveStats: function saveStats(aAppId, aServiceType, aNetwork, aTimeStamp,
                                 aRxBytes, aTxBytes, aIsAccumulative,
                                 aCallback) {
     let netId = this.convertNetworkInterface(aNetwork);
     if (!netId) {
       if (aCallback) {
-        aCallback.notify(false, "Invalid network type");
+        aCallback(false, "Invalid network type");
       }
       return;
     }
 
-    debug("saveStats: " + aAppId + " " + aServiceType + " " + netId + " " +
-          aTimeStamp + " " + aRxBytes + " " + aTxBytes);
-
     // Check if |aConnectionType|, |aAppId| and |aServiceType| are valid.
     // There are two invalid cases for the combination of |aAppId| and
     // |aServiceType|:
     // a. Both |aAppId| is non-zero and |aServiceType| is non-empty.
     // b. Both |aAppId| is zero and |aServiceType| is empty.
     if (!this._networks[netId] || (aAppId && aServiceType) ||
         (!aAppId && !aServiceType)) {
       debug("Invalid network interface, appId or serviceType");
@@ -731,114 +751,128 @@ this.NetworkStatsService = {
                   serviceType:    aServiceType,
                   networkId:      this._networks[netId].network.id,
                   networkType:    this._networks[netId].network.type,
                   date:           new Date(aTimeStamp),
                   rxBytes:        aRxBytes,
                   txBytes:        aTxBytes,
                   isAccumulative: aIsAccumulative };
 
+    this.updateQueue.push({ stats: stats,
+                            callbacks: [aCallback],
+                            queueType: QUEUE_TYPE_WRITE_CACHE });
+
+    this.processQueue();
+  },
+
+  /*
+   *
+   */
+  writeCache: function writeCache(aStats, aCallback) {
+    debug("saveStats: " + aStats.appId + " " + aStats.serviceType + " " +
+          aStats.networkId + " " + aStats.networkType + " " + aStats.date + " "
+          + aStats.date + " " + aStats.rxBytes + " " + aStats.txBytes);
+
     // Generate an unique key from |appId|, |serviceType| and |netId|,
     // which is used to retrieve data in |cachedStats|.
-    let key = stats.appId + "" + stats.serviceType + "" + netId;
+    let netId = this.getNetworkId(aStats.networkId, aStats.networkType);
+    let key = aStats.appId + "" + aStats.serviceType + "" + netId;
 
     // |cachedStats| only keeps the data with the same date.
     // If the incoming date is different from |cachedStatsDate|,
     // both |cachedStats| and |cachedStatsDate| will get updated.
-    let diff = (this._db.normalizeDate(stats.date) -
+    let diff = (this._db.normalizeDate(aStats.date) -
                 this._db.normalizeDate(this.cachedStatsDate)) /
                this._db.sampleRate;
     if (diff != 0) {
-      this.updateCachedStats(function onUpdated(success, message) {
-        this.cachedStatsDate = stats.date;
-        this.cachedStats[key] = stats;
-
-        if (!aCallback) {
-          return;
-        }
+      this.updateCache(function onUpdated(success, message) {
+        this.cachedStatsDate = aStats.date;
+        this.cachedStats[key] = aStats;
 
-        if (!success) {
-          aCallback.notify(false, message);
-          return;
+        if (aCallback) {
+          aCallback(true, "ok");
         }
-
-        aCallback.notify(true, "ok");
       }.bind(this));
-
       return;
     }
 
     // Try to find the matched row in the cached by |appId| and |connectionType|.
     // If not found, save the incoming data into the cached.
     let cachedStats = this.cachedStats[key];
     if (!cachedStats) {
-      this.cachedStats[key] = stats;
+      this.cachedStats[key] = aStats;
+      if (aCallback) {
+        aCallback(true, "ok");
+      }
       return;
     }
 
     // Find matched row, accumulate the traffic amount.
-    cachedStats.rxBytes += stats.rxBytes;
-    cachedStats.txBytes += stats.txBytes;
+    cachedStats.rxBytes += aStats.rxBytes;
+    cachedStats.txBytes += aStats.txBytes;
 
     // If new rxBytes or txBytes exceeds MAX_CACHED_TRAFFIC
     // the corresponding row will be saved to indexedDB.
     // Then, the row will be removed from the cached.
     if (cachedStats.rxBytes > MAX_CACHED_TRAFFIC ||
         cachedStats.txBytes > MAX_CACHED_TRAFFIC) {
-      this._db.saveStats(cachedStats,
-        function (error, result) {
-          debug("Application stats inserted in indexedDB");
+      this._db.saveStats(cachedStats, function (error, result) {
+        debug("Application stats inserted in indexedDB");
+        if (aCallback) {
+          aCallback(true, "ok");
         }
-      );
+      });
       delete this.cachedStats[key];
+      return;
+    }
+
+    if (aCallback) {
+      aCallback(true, "ok");
     }
   },
 
   updateCachedStats: function updateCachedStats(aCallback) {
-    debug("updateCachedStats: " + this.cachedStatsDate);
+    this.updateQueue.push({ callbacks: [aCallback],
+                            queueType: QUEUE_TYPE_UPDATE_CACHE });
+
+    this.processQueue();
+  },
+
+  updateCache: function updateCache(aCallback) {
+    debug("updateCache: " + this.cachedStatsDate);
 
     let stats = Object.keys(this.cachedStats);
     if (stats.length == 0) {
       // |cachedStats| is empty, no need to update.
       if (aCallback) {
         aCallback(true, "no need to update");
       }
-
       return;
     }
 
     let index = 0;
     this._db.saveStats(this.cachedStats[stats[index]],
-      function onSavedStats(error, result) {
-        if (DEBUG) {
-          debug("Application stats inserted in indexedDB");
-        }
+                       function onSavedStats(error, result) {
+      debug("Application stats inserted in indexedDB");
 
-        // Clean up the |cachedStats| after updating.
-        if (index == stats.length - 1) {
-          this.cachedStats = Object.create(null);
-
-          if (!aCallback) {
-            return;
-          }
+      // Clean up the |cachedStats| after updating.
+      if (index == stats.length - 1) {
+        this.cachedStats = Object.create(null);
 
-          if (error) {
-            aCallback(false, error);
-            return;
-          }
-
+        if (aCallback) {
           aCallback(true, "ok");
-          return;
         }
+        return;
+      }
 
-        // Update is not finished, keep updating.
-        index += 1;
-        this._db.saveStats(this.cachedStats[stats[index]],
-                           onSavedStats.bind(this, error, result));
-      }.bind(this));
+      // Update is not finished, keep updating.
+      index += 1;
+      this._db.saveStats(this.cachedStats[stats[index]],
+                         onSavedStats.bind(this, error, result));
+    }.bind(this));
   },
 
   get maxCachedTraffic () {
     return MAX_CACHED_TRAFFIC;
   },
 
   logAllRecords: function logAllRecords() {
     this._db.logAllRecords(function onResult(aError, aResult) {
--- a/dom/network/src/NetworkStatsServiceProxy.js
+++ b/dom/network/src/NetworkStatsServiceProxy.js
@@ -39,16 +39,20 @@ NetworkStatsServiceProxy.prototype = {
       return;
     }
 
     if (DEBUG) {
       debug("saveAppStats: " + aAppId + " " + aNetwork.type + " " + aTimeStamp +
             " " + aRxBytes + " " + aTxBytes + " " + aIsAccumulative);
     }
 
+    if (aCallback) {
+      aCallback = aCallback.notify;
+    }
+
     NetworkStatsService.saveStats(aAppId, "", aNetwork, aTimeStamp,
                                   aRxBytes, aTxBytes, aIsAccumulative,
                                   aCallback);
   },
 
   /*
    * Function called in the points of different system services
    * to pass the per-serive stats to NetworkStatsService.
@@ -64,16 +68,20 @@ NetworkStatsServiceProxy.prototype = {
     }
 
     if (DEBUG) {
       debug("saveServiceStats: " + aServiceType + " " + aNetwork.type + " " +
             aTimeStamp + " " + aRxBytes + " " + aTxBytes + " " +
             aIsAccumulative);
     }
 
+    if (aCallback) {
+      aCallback = aCallback.notify;
+    }
+
     NetworkStatsService.saveStats(0, aServiceType ,aNetwork, aTimeStamp,
                                   aRxBytes, aTxBytes, aIsAccumulative,
                                   aCallback);
   },
 
   classID : NETWORKSTATSSERVICEPROXY_CID,
   QueryInterface : XPCOMUtils.generateQI([nsINetworkStatsServiceProxy]),
 }
--- a/dom/network/tests/unit_stats/test_networkstats_service.js
+++ b/dom/network/tests/unit_stats/test_networkstats_service.js
@@ -2,16 +2,18 @@
    http://creativecommons.org/publicdomain/zero/1.0/ */
 
 const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
 
 const NETWORK_STATUS_READY   = 0;
 const NETWORK_STATUS_STANDBY = 1;
 const NETWORK_STATUS_AWAY    = 2;
 
+const QUEUE_TYPE_UPDATE_STATS = 0;
+
 var wifiId = '00';
 
 function getNetworks(callback) {
   NetworkStatsService._db.getAvailableNetworks(function onGetNetworks(aError, aResult) {
     callback(aError, aResult);
   });
 }
 
@@ -75,21 +77,21 @@ add_test(function test_update() {
     NetworkStatsService.update(netId, function (success, msg) {
       do_check_eq(success, true);
       run_next_test();
     });
   });
 });
 
 add_test(function test_updateQueueIndex() {
-  NetworkStatsService.updateQueue = [{netId: 0, callbacks: null},
-                                     {netId: 1, callbacks: null},
-                                     {netId: 2, callbacks: null},
-                                     {netId: 3, callbacks: null},
-                                     {netId: 4, callbacks: null}];
+  NetworkStatsService.updateQueue = [{netId: 0, callbacks: null, queueType: QUEUE_TYPE_UPDATE_STATS},
+                                     {netId: 1, callbacks: null, queueType: QUEUE_TYPE_UPDATE_STATS},
+                                     {netId: 2, callbacks: null, queueType: QUEUE_TYPE_UPDATE_STATS},
+                                     {netId: 3, callbacks: null, queueType: QUEUE_TYPE_UPDATE_STATS},
+                                     {netId: 4, callbacks: null, queueType: QUEUE_TYPE_UPDATE_STATS}];
   var index = NetworkStatsService.updateQueueIndex(3);
   do_check_eq(index, 3);
   index = NetworkStatsService.updateQueueIndex(10);
   do_check_eq(index, -1);
 
   NetworkStatsService.updateQueue = [];
   run_next_test();
 });
--- a/dom/network/tests/unit_stats/test_networkstats_service_proxy.js
+++ b/dom/network/tests/unit_stats/test_networkstats_service_proxy.js
@@ -31,181 +31,170 @@ function mokConvertNetworkInterface() {
 
     return netId;
   };
 }
 
 add_test(function test_saveAppStats() {
   var cachedStats = NetworkStatsService.cachedStats;
   var timestamp = NetworkStatsService.cachedStatsDate.getTime();
-  var samples = 5;
 
   // Create to fake nsINetworkInterfaces. As nsINetworkInterface can not
   // be instantiated, these two vars will emulate it by filling the properties
   // that will be used.
   var wifi = {type: Ci.nsINetworkInterface.NETWORK_TYPE_WIFI, id: "0"};
   var mobile = {type: Ci.nsINetworkInterface.NETWORK_TYPE_MOBILE, id: "1234"};
 
   // Insert fake mobile network interface in NetworkStatsService
   var mobileNetId = NetworkStatsService.getNetworkId(mobile.id, mobile.type);
 
   do_check_eq(Object.keys(cachedStats).length, 0);
 
-  for (var i = 0; i < samples; i++) {
-    nssProxy.saveAppStats(1, wifi, timestamp, 10, 20, false);
-
-    nssProxy.saveAppStats(1, mobile, timestamp, 10, 20, false);
-  }
-
-  var key1 = 1 + "" + NetworkStatsService.getNetworkId(wifi.id, wifi.type);
-  var key2 = 1 + "" + mobileNetId + "";
+  nssProxy.saveAppStats(1, wifi, timestamp, 10, 20, false,
+                        function (success, message) {
+    do_check_eq(success, true);
+    nssProxy.saveAppStats(1, mobile, timestamp, 10, 20, false,
+                          function (success, message) {
+      var key1 = 1 + "" + NetworkStatsService.getNetworkId(wifi.id, wifi.type);
+      var key2 = 1 + "" + mobileNetId + "";
 
-  do_check_eq(Object.keys(cachedStats).length, 2);
-  do_check_eq(cachedStats[key1].appId, 1);
-  do_check_eq(cachedStats[key1].serviceType.length, 0);
-  do_check_eq(cachedStats[key1].networkId, wifi.id);
-  do_check_eq(cachedStats[key1].networkType, wifi.type);
-  do_check_eq(new Date(cachedStats[key1].date).getTime() / 1000,
-              Math.floor(timestamp / 1000));
-  do_check_eq(cachedStats[key1].rxBytes, 50);
-  do_check_eq(cachedStats[key1].txBytes, 100);
-  do_check_eq(cachedStats[key2].appId, 1);
-  do_check_eq(cachedStats[key1].serviceType.length, 0);
-  do_check_eq(cachedStats[key2].networkId, mobile.id);
-  do_check_eq(cachedStats[key2].networkType, mobile.type);
-  do_check_eq(new Date(cachedStats[key2].date).getTime() / 1000,
-              Math.floor(timestamp / 1000));
-  do_check_eq(cachedStats[key2].rxBytes, 50);
-  do_check_eq(cachedStats[key2].txBytes, 100);
+      do_check_eq(Object.keys(cachedStats).length, 2);
+      do_check_eq(cachedStats[key1].appId, 1);
+      do_check_eq(cachedStats[key1].serviceType.length, 0);
+      do_check_eq(cachedStats[key1].networkId, wifi.id);
+      do_check_eq(cachedStats[key1].networkType, wifi.type);
+      do_check_eq(new Date(cachedStats[key1].date).getTime() / 1000,
+                  Math.floor(timestamp / 1000));
+      do_check_eq(cachedStats[key1].rxBytes, 10);
+      do_check_eq(cachedStats[key1].txBytes, 20);
+      do_check_eq(cachedStats[key2].appId, 1);
+      do_check_eq(cachedStats[key1].serviceType.length, 0);
+      do_check_eq(cachedStats[key2].networkId, mobile.id);
+      do_check_eq(cachedStats[key2].networkType, mobile.type);
+      do_check_eq(new Date(cachedStats[key2].date).getTime() / 1000,
+                  Math.floor(timestamp / 1000));
+      do_check_eq(cachedStats[key2].rxBytes, 10);
+      do_check_eq(cachedStats[key2].txBytes, 20);
 
-  run_next_test();
+      run_next_test();
+    });
+  });
 });
 
 add_test(function test_saveServiceStats() {
   var timestamp = NetworkStatsService.cachedStatsDate.getTime();
-  var samples = 5;
 
   // Create to fake nsINetworkInterfaces. As nsINetworkInterface can not
   // be instantiated, these two vars will emulate it by filling the properties
   // that will be used.
   var wifi = {type: Ci.nsINetworkInterface.NETWORK_TYPE_WIFI, id: "0"};
   var mobile = {type: Ci.nsINetworkInterface.NETWORK_TYPE_MOBILE, id: "1234"};
 
   // Insert fake mobile network interface in NetworkStatsService
   var mobileNetId = NetworkStatsService.getNetworkId(mobile.id, mobile.type);
 
-  NetworkStatsService.updateCachedStats(
-    function (success, msg) {
-      do_check_eq(success, true);
+  NetworkStatsService.updateCachedStats(function (success, msg) {
+    do_check_eq(success, true);
 
-      var cachedStats = NetworkStatsService.cachedStats;
-      do_check_eq(Object.keys(cachedStats).length, 0);
+    var cachedStats = NetworkStatsService.cachedStats;
+    do_check_eq(Object.keys(cachedStats).length, 0);
 
-      var serviceType = 'FakeType';
-      for (var i = 0; i < samples; i++) {
-        nssProxy.saveServiceStats(serviceType, wifi, timestamp, 10, 20, false);
-
-        nssProxy.saveServiceStats(serviceType, mobile, timestamp, 10, 20, false);
-      }
-
-      var key1 = 0 + "" + serviceType +
-                 NetworkStatsService.getNetworkId(wifi.id, wifi.type);
-      var key2 = 0 + "" + serviceType + mobileNetId + "";
+    var serviceType = 'FakeType';
+    nssProxy.saveServiceStats(serviceType, wifi, timestamp, 10, 20, false,
+                              function (success, message) {
+      do_check_eq(success, true);
+      nssProxy.saveServiceStats(serviceType, mobile, timestamp, 10, 20, false,
+                                function (success, message) {
+        do_check_eq(success, true);
+        var key1 = 0 + "" + serviceType +
+                   NetworkStatsService.getNetworkId(wifi.id, wifi.type);
+        var key2 = 0 + "" + serviceType + mobileNetId + "";
 
-      do_check_eq(Object.keys(cachedStats).length, 2);
-      do_check_eq(cachedStats[key1].appId, 0);
-      do_check_eq(cachedStats[key1].serviceType, serviceType);
-      do_check_eq(cachedStats[key1].networkId, wifi.id);
-      do_check_eq(cachedStats[key1].networkType, wifi.type);
-      do_check_eq(new Date(cachedStats[key1].date).getTime() / 1000,
-                  Math.floor(timestamp / 1000));
-      do_check_eq(cachedStats[key1].rxBytes, 50);
-      do_check_eq(cachedStats[key1].txBytes, 100);
-      do_check_eq(cachedStats[key2].appId, 0);
-      do_check_eq(cachedStats[key1].serviceType, serviceType);
-      do_check_eq(cachedStats[key2].networkId, mobile.id);
-      do_check_eq(cachedStats[key2].networkType, mobile.type);
-      do_check_eq(new Date(cachedStats[key2].date).getTime() / 1000,
-                  Math.floor(timestamp / 1000));
-      do_check_eq(cachedStats[key2].rxBytes, 50);
-      do_check_eq(cachedStats[key2].txBytes, 100);
+        do_check_eq(Object.keys(cachedStats).length, 2);
+        do_check_eq(cachedStats[key1].appId, 0);
+        do_check_eq(cachedStats[key1].serviceType, serviceType);
+        do_check_eq(cachedStats[key1].networkId, wifi.id);
+        do_check_eq(cachedStats[key1].networkType, wifi.type);
+        do_check_eq(new Date(cachedStats[key1].date).getTime() / 1000,
+                    Math.floor(timestamp / 1000));
+        do_check_eq(cachedStats[key1].rxBytes, 10);
+        do_check_eq(cachedStats[key1].txBytes, 20);
+        do_check_eq(cachedStats[key2].appId, 0);
+        do_check_eq(cachedStats[key1].serviceType, serviceType);
+        do_check_eq(cachedStats[key2].networkId, mobile.id);
+        do_check_eq(cachedStats[key2].networkType, mobile.type);
+        do_check_eq(new Date(cachedStats[key2].date).getTime() / 1000,
+                    Math.floor(timestamp / 1000));
+        do_check_eq(cachedStats[key2].rxBytes, 10);
+        do_check_eq(cachedStats[key2].txBytes, 20);
 
-      run_next_test();
-    }
-  );
+        run_next_test();
+      });
+    });
+  });
 });
 
 add_test(function test_saveStatsWithDifferentDates() {
   var today = NetworkStatsService.cachedStatsDate;
   var tomorrow = new Date(today.getTime() + (24 * 60 * 60 * 1000));
 
-  var wifi = {type: Ci.nsINetworkInterface.NETWORK_TYPE_WIFI, id: "0"};
   var mobile = {type: Ci.nsINetworkInterface.NETWORK_TYPE_MOBILE, id: "1234"};
 
-  var key = 1 + "" + NetworkStatsService.getNetworkId(wifi.id, wifi.type);
-
-  NetworkStatsService.updateCachedStats(
-    function (success, msg) {
-      do_check_eq(success, true);
-
-      do_check_eq(Object.keys(NetworkStatsService.cachedStats).length, 0);
+  NetworkStatsService.updateCachedStats(function (success, message) {
+    do_check_eq(success, true);
 
-      nssProxy.saveAppStats(1, wifi, today.getTime(), 10, 20, false);
-
-      nssProxy.saveAppStats(1, mobile, today.getTime(), 10, 20, false);
-
-      var saveStatsCb = {
-        notify: function notify(success, message) {
-          do_check_eq(success, true);
+    do_check_eq(Object.keys(NetworkStatsService.cachedStats).length, 0);
+    nssProxy.saveAppStats(1, mobile, today.getTime(), 10, 20, false,
+                          function (success, message) {
+      do_check_eq(success, true);
+      nssProxy.saveAppStats(2, mobile, tomorrow.getTime(), 30, 40, false,
+                            function (success, message) {
+        do_check_eq(success, true);
 
-          var cachedStats = NetworkStatsService.cachedStats;
-          var key = 2 + "" +
-                    NetworkStatsService.getNetworkId(mobile.id, mobile.type);
-          do_check_eq(Object.keys(cachedStats).length, 1);
-          do_check_eq(cachedStats[key].appId, 2);
-          do_check_eq(cachedStats[key].networkId, mobile.id);
-          do_check_eq(cachedStats[key].networkType, mobile.type);
-          do_check_eq(new Date(cachedStats[key].date).getTime() / 1000,
-                      Math.floor(tomorrow.getTime() / 1000));
-          do_check_eq(cachedStats[key].rxBytes, 30);
-          do_check_eq(cachedStats[key].txBytes, 40);
+        var cachedStats = NetworkStatsService.cachedStats;
+        var key = 2 + "" +
+                  NetworkStatsService.getNetworkId(mobile.id, mobile.type);
+        do_check_eq(Object.keys(cachedStats).length, 1);
+        do_check_eq(cachedStats[key].appId, 2);
+        do_check_eq(cachedStats[key].networkId, mobile.id);
+        do_check_eq(cachedStats[key].networkType, mobile.type);
+        do_check_eq(new Date(cachedStats[key].date).getTime() / 1000,
+                    Math.floor(tomorrow.getTime() / 1000));
+        do_check_eq(cachedStats[key].rxBytes, 30);
+        do_check_eq(cachedStats[key].txBytes, 40);
 
-          run_next_test();
-        }
-      };
-
-      nssProxy.saveAppStats(2, mobile, tomorrow.getTime(), 30, 40, false,
-                            saveStatsCb);
-    }
-  );
+        run_next_test();
+      });
+    });
+  });
 });
 
 add_test(function test_saveStatsWithMaxCachedTraffic() {
   var timestamp = NetworkStatsService.cachedStatsDate.getTime();
   var maxtraffic = NetworkStatsService.maxCachedTraffic;
   var wifi = {type: Ci.nsINetworkInterface.NETWORK_TYPE_WIFI, id: "0"};
 
-  NetworkStatsService.updateCachedStats(
-    function (success, msg) {
-      do_check_eq(success, true);
-
-      var cachedStats = NetworkStatsService.cachedStats;
-      do_check_eq(Object.keys(cachedStats).length, 0);
-
-      nssProxy.saveAppStats(1, wifi, timestamp, 10, 20, false);
+  NetworkStatsService.updateCachedStats(function (success, message) {
+    do_check_eq(success, true);
 
+    var cachedStats = NetworkStatsService.cachedStats;
+    do_check_eq(Object.keys(cachedStats).length, 0);
+    nssProxy.saveAppStats(1, wifi, timestamp, 10, 20, false,
+                          function (success, message) {
+      do_check_eq(success, true);
       do_check_eq(Object.keys(cachedStats).length, 1);
-
-      nssProxy.saveAppStats(1, wifi, timestamp, maxtraffic, 20, false);
+      nssProxy.saveAppStats(1, wifi, timestamp, maxtraffic, 20, false,
+                            function (success, message) {
+        do_check_eq(success, true);
+        do_check_eq(Object.keys(cachedStats).length, 0);
 
-      do_check_eq(Object.keys(cachedStats).length, 0);
-
-      run_next_test();
-    }
-  );
+        run_next_test();
+      });
+    });
+  });
 });
 
 function run_test() {
   do_get_profile();
 
   Cu.import("resource://gre/modules/NetworkStatsService.jsm");
 
   // Function convertNetworkInterface of NetworkStatsService causes errors when dealing
--- a/dom/webidl/RTCStatsReport.webidl
+++ b/dom/webidl/RTCStatsReport.webidl
@@ -35,16 +35,17 @@ dictionary RTCRTPStreamStats : RTCStats 
 
 dictionary RTCInboundRTPStreamStats : RTCRTPStreamStats {
   unsigned long packetsReceived;
   unsigned long long bytesReceived;
   double jitter;
   unsigned long packetsLost;
   long mozAvSyncDelay;
   long mozJitterBufferDelay;
+  long mozRtt;
 };
 
 dictionary RTCOutboundRTPStreamStats : RTCRTPStreamStats {
   unsigned long packetsSent;
   unsigned long long bytesSent;
 };
 
 dictionary RTCMediaStreamTrackStats : RTCStats {
@@ -53,17 +54,16 @@ dictionary RTCMediaStreamTrackStats : RT
   sequence<DOMString> ssrcIds;
   unsigned long audioLevel;       // Only for audio, the rest are only for video
   unsigned long frameWidth;
   unsigned long frameHeight;
   double framesPerSecond;         // The nominal FPS value
   unsigned long framesSent;
   unsigned long framesReceived;   // Only for remoteSource=true
   unsigned long framesDecoded;
-  unsigned long first;
 };
 
 dictionary RTCMediaStreamStats : RTCStats {
   DOMString streamIdentifier;     // stream.id property
   sequence<DOMString> trackIds;   // Note: stats object ids, not track.id
 };
 
 dictionary RTCTransportStats: RTCStats {
--- a/editor/libeditor/base/nsEditorEventListener.cpp
+++ b/editor/libeditor/base/nsEditorEventListener.cpp
@@ -34,80 +34,64 @@
 #include "nsIDOMRange.h"                // for nsIDOMRange
 #include "nsIDocument.h"                // for nsIDocument
 #include "nsIEditor.h"                  // for nsEditor::GetSelection, etc
 #include "nsIEditorIMESupport.h"
 #include "nsIEditorMailSupport.h"       // for nsIEditorMailSupport
 #include "nsIFocusManager.h"            // for nsIFocusManager
 #include "nsIFormControl.h"             // for nsIFormControl, etc
 #include "nsIHTMLEditor.h"              // for nsIHTMLEditor
-#include "nsINativeKeyBindings.h"       // for nsINativeKeyBindings
 #include "nsINode.h"                    // for nsINode, ::NODE_IS_EDITABLE, etc
 #include "nsIPlaintextEditor.h"         // for nsIPlaintextEditor, etc
 #include "nsIPresShell.h"               // for nsIPresShell
 #include "nsISelection.h"               // for nsISelection
 #include "nsISelectionController.h"     // for nsISelectionController, etc
 #include "nsISelectionPrivate.h"        // for nsISelectionPrivate
 #include "nsITransferable.h"            // for kFileMime, kHTMLMime, etc
+#include "nsIWidget.h"                  // for nsIWidget
 #include "nsLiteralString.h"            // for NS_LITERAL_STRING
 #include "nsPIWindowRoot.h"             // for nsPIWindowRoot
 #include "nsServiceManagerUtils.h"      // for do_GetService
 #include "nsString.h"                   // for nsAutoString
 #ifdef HANDLE_NATIVE_TEXT_DIRECTION_SWITCH
 #include "nsContentUtils.h"             // for nsContentUtils, etc
 #include "nsIBidiKeyboard.h"            // for nsIBidiKeyboard
 #endif
 
 class nsPresContext;
 
 using namespace mozilla;
 using namespace mozilla::dom;
 
-static nsINativeKeyBindings *sNativeEditorBindings = nullptr;
-
-static nsINativeKeyBindings*
-GetEditorKeyBindings()
-{
-  static bool noBindings = false;
-  if (!sNativeEditorBindings && !noBindings) {
-    CallGetService(NS_NATIVEKEYBINDINGS_CONTRACTID_PREFIX "editor",
-                   &sNativeEditorBindings);
-
-    if (!sNativeEditorBindings) {
-      noBindings = true;
-    }
-  }
-
-  return sNativeEditorBindings;
-}
-
 static void
-DoCommandCallback(const char *aCommand, void *aData)
+DoCommandCallback(Command aCommand, void* aData)
 {
   nsIDocument* doc = static_cast<nsIDocument*>(aData);
   nsPIDOMWindow* win = doc->GetWindow();
   if (!win) {
     return;
   }
   nsCOMPtr<nsPIWindowRoot> root = win->GetTopWindowRoot();
   if (!root) {
     return;
   }
 
+  const char* commandStr = WidgetKeyboardEvent::GetCommandStr(aCommand);
+
   nsCOMPtr<nsIController> controller;
-  root->GetControllerForCommand(aCommand, getter_AddRefs(controller));
+  root->GetControllerForCommand(commandStr, getter_AddRefs(controller));
   if (!controller) {
     return;
   }
 
   bool commandEnabled;
-  nsresult rv = controller->IsCommandEnabled(aCommand, &commandEnabled);
+  nsresult rv = controller->IsCommandEnabled(commandStr, &commandEnabled);
   NS_ENSURE_SUCCESS_VOID(rv);
   if (commandEnabled) {
-    controller->DoCommand(aCommand);
+    controller->DoCommand(commandStr);
   }
 }
 
 nsEditorEventListener::nsEditorEventListener() :
   mEditor(nullptr), mCommitText(false),
   mInTransaction(false)
 #ifdef HANDLE_NATIVE_TEXT_DIRECTION_SWITCH
   , mHaveBidiKeyboards(false)
@@ -120,22 +104,16 @@ nsEditorEventListener::nsEditorEventList
 nsEditorEventListener::~nsEditorEventListener() 
 {
   if (mEditor) {
     NS_WARNING("We're not uninstalled");
     Disconnect();
   }
 }
 
-/* static */ void
-nsEditorEventListener::ShutDown()
-{
-  NS_IF_RELEASE(sNativeEditorBindings);
-}
-
 nsresult
 nsEditorEventListener::Connect(nsEditor* aEditor)
 {
   NS_ENSURE_ARG(aEditor);
 
 #ifdef HANDLE_NATIVE_TEXT_DIRECTION_SWITCH
   nsIBidiKeyboard* bidiKeyboard = nsContentUtils::GetBidiKeyboard();
   if (bidiKeyboard) {
@@ -521,29 +499,35 @@ nsEditorEventListener::KeyPress(nsIDOMEv
   nsresult rv = mEditor->HandleKeyPressEvent(keyEvent);
   NS_ENSURE_SUCCESS(rv, rv);
 
   aKeyEvent->GetDefaultPrevented(&defaultPrevented);
   if (defaultPrevented) {
     return NS_OK;
   }
 
-  if (GetEditorKeyBindings() && ShouldHandleNativeKeyBindings(aKeyEvent)) {
+  if (ShouldHandleNativeKeyBindings(aKeyEvent)) {
     // Now, ask the native key bindings to handle the event.
-    // XXX Note that we're not passing the keydown/keyup events to the native
-    // key bindings, which should be OK since those events are only handled on
-    // Windows for now, where we don't have native key bindings.
     WidgetKeyboardEvent* keyEvent =
       aKeyEvent->GetInternalNSEvent()->AsKeyboardEvent();
     MOZ_ASSERT(keyEvent,
                "DOM key event's internal event must be WidgetKeyboardEvent");
+    nsIWidget* widget = keyEvent->widget;
+    // If the event is created by chrome script, the widget is always nullptr.
+    if (!widget) {
+      nsCOMPtr<nsIPresShell> ps = GetPresShell();
+      nsPresContext* pc = ps ? ps->GetPresContext() : nullptr;
+      widget = pc ? pc->GetNearestWidget() : nullptr;
+      NS_ENSURE_TRUE(widget, NS_OK);
+    }
+
     nsCOMPtr<nsIDocument> doc = mEditor->GetDocument();
-    bool handled = sNativeEditorBindings->KeyPress(*keyEvent,
-                                                   DoCommandCallback,
-                                                   doc);
+    bool handled = widget->ExecuteNativeKeyBinding(
+                             nsIWidget::NativeKeyBindingsForRichTextEditor,
+                             *keyEvent, DoCommandCallback, doc);
     if (handled) {
       aKeyEvent->PreventDefault();
     }
   }
 
   return NS_OK;
 }
 
--- a/editor/libeditor/base/nsEditorEventListener.h
+++ b/editor/libeditor/base/nsEditorEventListener.h
@@ -54,18 +54,16 @@ public:
   NS_IMETHOD MouseDown(nsIDOMEvent* aMouseEvent);
   NS_IMETHOD MouseUp(nsIDOMEvent* aMouseEvent) { return NS_OK; }
   NS_IMETHOD MouseClick(nsIDOMEvent* aMouseEvent);
   NS_IMETHOD Focus(nsIDOMEvent* aEvent);
   NS_IMETHOD Blur(nsIDOMEvent* aEvent);
 
   void SpellCheckIfNeeded();
 
-  static NS_HIDDEN_(void) ShutDown();
-
 protected:
   nsresult InstallToEditor();
   void UninstallFromEditor();
 
   bool CanDrop(nsIDOMDragEvent* aEvent);
   nsresult DragEnter(nsIDOMDragEvent* aDragEvent);
   nsresult DragOver(nsIDOMDragEvent* aDragEvent);
   nsresult DragExit(nsIDOMDragEvent* aDragEvent);
--- a/gfx/2d/Logging.h
+++ b/gfx/2d/Logging.h
@@ -105,20 +105,22 @@ public:
     mMessage.str("");
     mMessage.clear();
   }
 
   Log &operator <<(char aChar) { mMessage << aChar; return *this; }
   Log &operator <<(const std::string &aLogText) { mMessage << aLogText; return *this; }
   Log &operator <<(const char aStr[]) { mMessage << static_cast<const char*>(aStr); return *this; }
   Log &operator <<(bool aBool) { mMessage << (aBool ? "true" : "false"); return *this; }
-  Log &operator <<(int32_t aInt) { mMessage << aInt; return *this; }
-  Log &operator <<(uint32_t aInt) { mMessage << aInt; return *this; }
-  Log &operator <<(int64_t aLong) { mMessage << aLong; return *this; }
-  Log &operator <<(uint64_t aLong) { mMessage << aLong; return *this; }
+  Log &operator <<(int aInt) { mMessage << aInt; return *this; }
+  Log &operator <<(unsigned int aInt) { mMessage << aInt; return *this; }
+  Log &operator <<(long aLong) { mMessage << aLong; return *this; }
+  Log &operator <<(unsigned long aLong) { mMessage << aLong; return *this; }
+  Log &operator <<(long long aLong) { mMessage << aLong; return *this; }
+  Log &operator <<(unsigned long long aLong) { mMessage << aLong; return *this; }
   Log &operator <<(Float aFloat) { mMessage << aFloat; return *this; }
   Log &operator <<(double aDouble) { mMessage << aDouble; return *this; }
   template <typename T, typename Sub>
   Log &operator <<(const BasePoint<T, Sub>& aPoint)
     { mMessage << "Point(" << aPoint.x << "," << aPoint.y << ")"; return *this; }
   template <typename T, typename Sub>
   Log &operator <<(const BaseSize<T, Sub>& aSize)
     { mMessage << "Size(" << aSize.width << "," << aSize.height << ")"; return *this; }
--- a/gfx/layers/client/SimpleTiledContentClient.cpp
+++ b/gfx/layers/client/SimpleTiledContentClient.cpp
@@ -198,17 +198,17 @@ SimpleTiledLayerBuffer::GetSurfaceDescri
   InfallibleTArray<TileDescriptor> tiles;
 
   for (size_t i = 0; i < mRetainedTiles.Length(); i++) {
     tiles.AppendElement(mRetainedTiles[i].GetTileDescriptor());
   }
 
   return SurfaceDescriptorTiles(mValidRegion, mPaintedRegion,
                                 tiles, mRetainedWidth, mRetainedHeight,
-                                mResolution);
+                                mResolution, mFrameResolution.scale);
 }
 
 bool
 SimpleTiledLayerBuffer::HasFormatChanged() const
 {
   return mThebesLayer->CanUseOpaqueSurface() != mLastPaintOpaque;
 }
 
--- a/gfx/layers/client/TiledContentClient.cpp
+++ b/gfx/layers/client/TiledContentClient.cpp
@@ -603,17 +603,17 @@ ClientTiledLayerBuffer::GetSurfaceDescri
       tileDesc = PlaceholderTileDescriptor();
     } else {
       tileDesc = mRetainedTiles[i].GetTileDescriptor();
     }
     tiles.AppendElement(tileDesc);
   }
   return SurfaceDescriptorTiles(mValidRegion, mPaintedRegion,
                                 tiles, mRetainedWidth, mRetainedHeight,
-                                mResolution);
+                                mResolution, mFrameResolution.scale);
 }
 
 void
 ClientTiledLayerBuffer::PaintThebes(const nsIntRegion& aNewValidRegion,
                                    const nsIntRegion& aPaintRegion,
                                    LayerManager::DrawThebesLayerCallback aCallback,
                                    void* aCallbackData)
 {
--- a/gfx/layers/composite/TiledContentHost.cpp
+++ b/gfx/layers/composite/TiledContentHost.cpp
@@ -29,25 +29,25 @@ TiledLayerBufferComposite::TiledLayerBuf
   : mFrameResolution(1.0)
   , mHasDoubleBufferedTiles(false)
   , mUninitialized(true)
 {}
 
 TiledLayerBufferComposite::TiledLayerBufferComposite(ISurfaceAllocator* aAllocator,
                                                      const SurfaceDescriptorTiles& aDescriptor,
                                                      const nsIntRegion& aOldPaintedRegion)
-  : mFrameResolution(1.0)
 {
   mUninitialized = false;
   mHasDoubleBufferedTiles = false;
   mValidRegion = aDescriptor.validRegion();
   mPaintedRegion = aDescriptor.paintedRegion();
   mRetainedWidth = aDescriptor.retainedWidth();
   mRetainedHeight = aDescriptor.retainedHeight();
   mResolution = aDescriptor.resolution();
+  mFrameResolution = CSSToScreenScale(aDescriptor.frameResolution());
 
   // Combine any valid content that wasn't already uploaded
   nsIntRegion oldPaintedRegion(aOldPaintedRegion);
   oldPaintedRegion.And(oldPaintedRegion, mValidRegion);
   mPaintedRegion.Or(mPaintedRegion, oldPaintedRegion);
 
   const InfallibleTArray<TileDescriptor>& tiles = aDescriptor.tiles();
   for(size_t i = 0; i < tiles.Length(); i++) {
--- a/gfx/layers/ipc/Axis.cpp
+++ b/gfx/layers/ipc/Axis.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "Axis.h"
 #include <math.h>                       // for fabsf, pow, powf
 #include <algorithm>                    // for max
 #include "AsyncPanZoomController.h"     // for AsyncPanZoomController
+#include "mozilla/layers/APZCTreeManager.h" // for APZCTreeManager
 #include "FrameMetrics.h"               // for FrameMetrics
 #include "mozilla/Attributes.h"         // for MOZ_FINAL
 #include "mozilla/Preferences.h"        // for Preferences
 #include "mozilla/gfx/Rect.h"           // for RoundedIn
 #include "mozilla/mozalloc.h"           // for operator new
 #include "nsMathUtils.h"                // for NS_lround
 #include "nsThreadUtils.h"              // for NS_DispatchToMainThread, etc
 #include "nscore.h"                     // for NS_IMETHOD
@@ -60,36 +61,36 @@ namespace layers {
  * Maximum size of velocity queue. The queue contains last N velocity records.
  * On touch end we calculate the average velocity in order to compensate
  * touch/mouse drivers misbehaviour.
  *
  * The default value is 5, set in gfxPrefs.h
  */
 
 /**
- * "apz.max_velocity_pixels_per_ms"
+ * "apz.max_velocity_inches_per_ms"
  *
- * Maximum velocity in pixels per millisecond.  Velocity will be capped at this
+ * Maximum velocity in inches per millisecond.  Velocity will be capped at this
  * value if a faster fling occurs.  Negative values indicate unlimited velocity.
  *
  * The default value is -1.0f, set in gfxPrefs.h
  */
 
 Axis::Axis(AsyncPanZoomController* aAsyncPanZoomController)
   : mPos(0),
     mVelocity(0.0f),
     mAxisLocked(false),
     mAsyncPanZoomController(aAsyncPanZoomController)
 {
 }
 
 void Axis::UpdateWithTouchAtDevicePoint(int32_t aPos, const TimeDuration& aTimeDelta) {
   float newVelocity = mAxisLocked ? 0 : (mPos - aPos) / aTimeDelta.ToMilliseconds();
   if (gfxPrefs::APZMaxVelocity() > 0.0f) {
-    newVelocity = std::min(newVelocity, gfxPrefs::APZMaxVelocity());
+    newVelocity = std::min(newVelocity, gfxPrefs::APZMaxVelocity() * APZCTreeManager::GetDPI());
   }
 
   mVelocity = newVelocity;
   mPos = aPos;
 
   // Limit queue size pased on pref
   mVelocityQueue.AppendElement(mVelocity);
   if (mVelocityQueue.Length() > gfxPrefs::APZMaxVelocityQueueSize()) {
--- a/gfx/layers/ipc/LayersMessages.ipdlh
+++ b/gfx/layers/ipc/LayersMessages.ipdlh
@@ -281,16 +281,17 @@ union TileDescriptor {
 
 struct SurfaceDescriptorTiles {
   nsIntRegion validRegion;
   nsIntRegion paintedRegion;
   TileDescriptor[] tiles;
   int         retainedWidth;
   int         retainedHeight;
   float       resolution;
+  float       frameResolution;
 };
 
 struct OpUseTiledLayerBuffer {
   PCompositable compositable;
   SurfaceDescriptorTiles tileLayerDescriptor;
 };
 
 struct OpCreatedTexture {
--- a/gfx/thebes/gfxFT2Utils.cpp
+++ b/gfx/thebes/gfxFT2Utils.cpp
@@ -48,37 +48,39 @@ gfxFT2LockedFace::GetMetrics(gfxFont::Me
                              uint32_t* aSpaceGlyph)
 {
     NS_PRECONDITION(aMetrics != nullptr, "aMetrics must not be NULL");
     NS_PRECONDITION(aSpaceGlyph != nullptr, "aSpaceGlyph must not be NULL");
 
     if (MOZ_UNLIKELY(!mFace)) {
         // No face.  This unfortunate situation might happen if the font
         // file is (re)moved at the wrong time.
-        aMetrics->emHeight = mGfxFont->GetStyle()->size;
-        aMetrics->emAscent = 0.8 * aMetrics->emHeight;
-        aMetrics->emDescent = 0.2 * aMetrics->emHeight;
-        aMetrics->maxAscent = aMetrics->emAscent;
-        aMetrics->maxDescent = aMetrics->maxDescent;
-        aMetrics->maxHeight = aMetrics->emHeight;
+        const gfxFloat emHeight = mGfxFont->GetStyle()->size;
+        aMetrics->emHeight = emHeight;
+        aMetrics->maxAscent = aMetrics->emAscent = 0.8 * emHeight;
+        aMetrics->maxDescent = aMetrics->emDescent = 0.2 * emHeight;
+        aMetrics->maxHeight = emHeight;
         aMetrics->internalLeading = 0.0;
-        aMetrics->externalLeading = 0.2 * aMetrics->emHeight;
-        aSpaceGlyph = 0;
-        aMetrics->spaceWidth = 0.5 * aMetrics->emHeight;
-        aMetrics->maxAdvance = aMetrics->spaceWidth;
-        aMetrics->aveCharWidth = aMetrics->spaceWidth;
-        aMetrics->zeroOrAveCharWidth = aMetrics->spaceWidth;
-        aMetrics->xHeight = 0.5 * aMetrics->emHeight;
-        aMetrics->underlineSize = aMetrics->emHeight / 14.0;
-        aMetrics->underlineOffset = -aMetrics->underlineSize;
-        aMetrics->strikeoutOffset = 0.25 * aMetrics->emHeight;
-        aMetrics->strikeoutSize = aMetrics->underlineSize;
-        aMetrics->superscriptOffset = aMetrics->xHeight;
-        aMetrics->subscriptOffset = aMetrics->xHeight;
+        aMetrics->externalLeading = 0.2 * emHeight;
+        const gfxFloat spaceWidth = 0.5 * emHeight;
+        aMetrics->spaceWidth = spaceWidth;
+        aMetrics->maxAdvance = spaceWidth;
+        aMetrics->aveCharWidth = spaceWidth;
+        aMetrics->zeroOrAveCharWidth = spaceWidth;
+        const gfxFloat xHeight = 0.5 * emHeight;
+        aMetrics->xHeight = xHeight;
+        aMetrics->superscriptOffset = xHeight;
+        aMetrics->subscriptOffset = xHeight;
+        const gfxFloat underlineSize = emHeight / 14.0;
+        aMetrics->underlineSize = underlineSize;
+        aMetrics->underlineOffset = -underlineSize;
+        aMetrics->strikeoutOffset = 0.25 * emHeight;
+        aMetrics->strikeoutSize = underlineSize;
 
+        *aSpaceGlyph = 0;
         return;
     }
 
     const FT_Size_Metrics& ftMetrics = mFace->size->metrics;
 
     gfxFloat emHeight;
     // Scale for vertical design metric conversion: pixels per design unit.
     // If this remains at 0.0, we can't use metrics from OS/2 etc.
--- a/gfx/thebes/gfxPrefs.h
+++ b/gfx/thebes/gfxPrefs.h
@@ -101,17 +101,17 @@ private:
 
   // This is where DECL_GFX_PREF for each of the preferences should go.
   // We will keep these in an alphabetical order to make it easier to see if
   // a method accessing a pref already exists. Just add yours in the list.
 
   DECL_GFX_PREF(Once, "apz.fling_friction",                    APZFlingFriction, float, 0.002f);
   DECL_GFX_PREF(Once, "apz.fling_stopped_threshold",           APZFlingStoppedThreshold, float, 0.01f);
   DECL_GFX_PREF(Once, "apz.max_event_acceleration",            APZMaxEventAcceleration, float, 999.0f);
-  DECL_GFX_PREF(Once, "apz.max_velocity_pixels_per_ms",        APZMaxVelocity, float, -1.0f);
+  DECL_GFX_PREF(Once, "apz.max_velocity_inches_per_ms",        APZMaxVelocity, float, -1.0f);
   DECL_GFX_PREF(Once, "apz.max_velocity_queue_size",           APZMaxVelocityQueueSize, uint32_t, 5);
 
   DECL_GFX_PREF(Once, "gfx.android.rgb16.force",               AndroidRGB16Force, bool, false);
 #if defined(ANDROID)
   DECL_GFX_PREF(Once, "gfx.apitrace.enabled",                  UseApitrace, bool, false);
 #endif
   DECL_GFX_PREF(Live, "gfx.canvas.azure.accelerated",          CanvasAzureAccelerated, bool, false);
   DECL_GFX_PREF(Once, "gfx.canvas.skiagl.dynamic-cache",       CanvasSkiaGLDynamicCache, bool, false);
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -118,17 +118,16 @@ namespace JS {
 // Data for tracking memory usage of things hanging off objects.
 struct ObjectsExtraSizes
 {
 #define FOR_EACH_SIZE(macro) \
     macro(Objects, NotLiveGCThing, mallocHeapSlots) \
     macro(Objects, NotLiveGCThing, mallocHeapElementsNonAsmJS) \
     macro(Objects, NotLiveGCThing, mallocHeapElementsAsmJS) \
     macro(Objects, NotLiveGCThing, nonHeapElementsAsmJS) \
-    macro(Objects, NotLiveGCThing, nonHeapElementsMapped) \
     macro(Objects, NotLiveGCThing, nonHeapCodeAsmJS) \
     macro(Objects, NotLiveGCThing, mallocHeapAsmJSModuleData) \
     macro(Objects, NotLiveGCThing, mallocHeapArgumentsData) \
     macro(Objects, NotLiveGCThing, mallocHeapRegExpStatics) \
     macro(Objects, NotLiveGCThing, mallocHeapPropertyIteratorData) \
     macro(Objects, NotLiveGCThing, mallocHeapCtypesData)
 
     ObjectsExtraSizes()
@@ -401,16 +400,17 @@ struct ZoneStats
     macro(Other,   NotLiveGCThing, gcHeapArenaAdmin) \
     macro(Other,   NotLiveGCThing, unusedGCThings) \
     macro(Other,   IsLiveGCThing,  lazyScriptsGCHeap) \
     macro(Other,   NotLiveGCThing, lazyScriptsMallocHeap) \
     macro(Other,   IsLiveGCThing,  jitCodesGCHeap) \
     macro(Other,   IsLiveGCThing,  typeObjectsGCHeap) \
     macro(Other,   NotLiveGCThing, typeObjectsMallocHeap) \
     macro(Other,   NotLiveGCThing, typePool) \
+    macro(Other,   NotLiveGCThing, baselineStubsOptimized) \
 
     ZoneStats()
       : FOR_EACH_SIZE(ZERO_SIZE)
         stringInfo(),
         extra(),
         allStrings(nullptr),
         notableStrings(),
         isTotals(true)
@@ -496,17 +496,16 @@ struct CompartmentStats
     macro(Other,   NotLiveGCThing, shapesMallocHeapTreeTables) \
     macro(Other,   NotLiveGCThing, shapesMallocHeapDictTables) \
     macro(Other,   NotLiveGCThing, shapesMallocHeapTreeShapeKids) \
     macro(Other,   NotLiveGCThing, shapesMallocHeapCompartmentTables) \
     macro(Other,   IsLiveGCThing,  scriptsGCHeap) \
     macro(Other,   NotLiveGCThing, scriptsMallocHeapData) \
     macro(Other,   NotLiveGCThing, baselineData) \
     macro(Other,   NotLiveGCThing, baselineStubsFallback) \
-    macro(Other,   NotLiveGCThing, baselineStubsOptimized) \
     macro(Other,   NotLiveGCThing, ionData) \
     macro(Other,   NotLiveGCThing, typeInferenceTypeScripts) \
     macro(Other,   NotLiveGCThing, typeInferenceAllocationSiteTables) \
     macro(Other,   NotLiveGCThing, typeInferenceArrayTypeTables) \
     macro(Other,   NotLiveGCThing, typeInferenceObjectTypeTables) \
     macro(Other,   NotLiveGCThing, compartmentObject) \
     macro(Other,   NotLiveGCThing, crossCompartmentWrappersTable) \
     macro(Other,   NotLiveGCThing, regexpCompartment) \
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -728,16 +728,25 @@ UnsizedArrayTypeDescr::dimension(JSConte
                                   unsizedTypeDescrValue, nullptr, nullptr,
                                   JSPROP_READONLY | JSPROP_PERMANENT))
         return nullptr;
 
     args.rval().setObject(*obj);
     return true;
 }
 
+bool
+js::IsTypedObjectArray(JSObject &obj)
+{
+    if (!obj.is<TypedObject>())
+        return false;
+    TypeDescr& d = obj.as<TypedObject>().typeDescr();
+    return d.is<SizedArrayTypeDescr>() || d.is<UnsizedArrayTypeDescr>();
+}
+
 /*********************************
  * StructType class
  */
 
 const Class StructTypeDescr::class_ = {
     "StructType",
     JSCLASS_HAS_RESERVED_SLOTS(JS_DESCR_SLOTS) |
     JSCLASS_HAS_PRIVATE, // used to store FieldList
--- a/js/src/builtin/TypedObject.h
+++ b/js/src/builtin/TypedObject.h
@@ -290,17 +290,18 @@ class X4TypeDescr : public SizedTypeDesc
     static bool call(JSContext *cx, unsigned argc, Value *vp);
     static bool is(const Value &v);
 };
 
 #define JS_FOR_EACH_X4_TYPE_REPR(macro_)                             \
     macro_(X4TypeDescr::TYPE_INT32, int32_t, int32)                  \
     macro_(X4TypeDescr::TYPE_FLOAT32, float, float32)
 
-bool IsTypedObjectClass(const Class *clasp); // Defined in TypedArrayObject.h
+bool IsTypedObjectClass(const Class *clasp); // Defined below
+bool IsTypedObjectArray(JSObject& obj);
 
 bool InitializeCommonTypeDescriptorProperties(JSContext *cx,
                                               HandleTypeDescr obj,
                                               HandleObject typeReprOwnerObj);
 
 /*
  * Properties and methods of the `ArrayType` meta type object. There
  * is no `class_` field because `ArrayType` is just a native
--- a/js/src/builtin/TypedObject.js
+++ b/js/src/builtin/TypedObject.js
@@ -1505,17 +1505,17 @@ function MapTypedParImplDepth1(inArray, 
                       ? RedirectPointer(outTypedObject, outOffset,
                                         outGrainTypeIsTransparent)
                       : undefined);
         const r = func(inVal, i, inArray, outVal);
         if (r !== undefined) {
           if (outGrainTypeIsComplex)
             SetTypedObjectValue(outGrainType, outArray, outOffset, r);
           else
-            outArray[i] = r;
+          UnsafePutElements(outArray, i, r);
         }
         inOffset += inGrainTypeSize;
         outOffset += outGrainTypeSize;
       }
 
       MARK_SLICE_DONE(slicesInfo, sliceId);
       if (warmup)
         return;
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -102,31 +102,16 @@ size_t
 gc::GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
         return 0;
     return pmc.PageFaultCount;
 }
 
-void *
-gc::AllocateMappedObject(int fd, int *new_fd, size_t offset, size_t length,
-                         size_t alignment, size_t header)
-{
-    // TODO: to be implemented.
-    return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void
-gc::DeallocateMappedObject(int fd, void *p, size_t length)
-{
-    // TODO: to be implemented.
-}
-
 #elif defined(SOLARIS)
 
 #include <sys/mman.h>
 #include <unistd.h>
 
 #ifndef MAP_NOSYNC
 # define MAP_NOSYNC 0
 #endif
@@ -175,38 +160,20 @@ gc::MarkPagesInUse(JSRuntime *rt, void *
 }
 
 size_t
 gc::GetPageFaultCount()
 {
     return 0;
 }
 
-void *
-gc::AllocateMappedObject(int fd, int *new_fd, size_t offset, size_t length,
-                         size_t alignment, size_t header)
-{
-    // TODO: to be implemented.
-    return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void
-gc::DeallocateMappedObject(int fd, void *p, size_t length)
-{
-    // TODO: to be implemented.
-}
-
 #elif defined(XP_UNIX)
 
-#include <algorithm>
 #include <sys/mman.h>
 #include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/types.h>
 #include <unistd.h>
 
 void
 gc::InitMemorySubsystem(JSRuntime *rt)
 {
     rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
@@ -313,95 +280,11 @@ gc::GetPageFaultCount()
 {
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
     if (err)
         return 0;
     return usage.ru_majflt;
 }
 
-void *
-gc::AllocateMappedObject(int fd, int *new_fd, size_t offset, size_t length,
-                         size_t alignment, size_t header)
-{
-#define NEED_PAGE_ALIGNED 0
-    size_t pa_start; // Page aligned starting
-    size_t pa_end; // Page aligned ending
-    size_t pa_size; // Total page aligned size
-    size_t page_size = sysconf(_SC_PAGESIZE); // Page size
-    bool page_for_header = false; // Do we need an additional page for header?
-    struct stat st;
-    uint8_t *buf;
-
-    // Make sure file exists and do sanity check for offset and size.
-    if (fstat(fd, &st) < 0 || offset >= (size_t) st.st_size ||
-        length == 0 || length > (size_t) st.st_size - offset)
-        return nullptr;
-
-    // Check for minimal alignment requirement.
-#if NEED_PAGE_ALIGNED
-    alignment = std::max(alignment, page_size);
-#endif
-    if (offset & (alignment - 1))
-        return nullptr;
-
-    // Page aligned starting of the offset.
-    pa_start = offset & ~(page_size - 1);
-    // Calculate page aligned ending by adding one page to the page aligned
-    // starting of data end position(offset + length - 1).
-    pa_end = ((offset + length - 1) & ~(page_size - 1)) + page_size;
-    pa_size = pa_end - pa_start;
-
-    // Do we need one more page for header?
-    if (offset - pa_start < header) {
-        page_for_header = true;
-        pa_size += page_size;
-    }
-
-    // Ask for a continuous memory location.
-    buf = (uint8_t *) MapMemory(pa_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
-    if (buf == MAP_FAILED)
-        return nullptr;
-
-    // Duplicate a new fd for mapping, so each cloned object uses a different fd.
-    *new_fd = dup(fd);
-
-    // If there's an additional page for header, don't map that page to file.
-    if (page_for_header) {
-        buf = (uint8_t *) mmap(buf + page_size, pa_size - page_size,
-                               PROT_READ | PROT_WRITE,
-                               MAP_PRIVATE | MAP_FIXED, *new_fd, pa_start);
-    } else {
-        buf = (uint8_t *) mmap(buf, pa_size, PROT_READ | PROT_WRITE,
-                               MAP_PRIVATE | MAP_FIXED, *new_fd, pa_start);
-    }
-    if (buf == MAP_FAILED) {
-        close(*new_fd);
-        return nullptr;
-    }
-
-    // Reset the data before target file, which we don't need to see.
-    memset(buf, 0, offset - pa_start);
-
-    // Reset the data after target file, which we don't need to see.
-    memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));
-
-    return buf + (offset - pa_start) - header;
-}
-
-void
-gc::DeallocateMappedObject(int fd, void *p, size_t length)
-{
-    void *pa_start; // Page aligned starting
-    size_t page_size = sysconf(_SC_PAGESIZE); // Page size
-    size_t total_size; // Total allocated size
-
-    // The fd is not needed anymore.
-    close(fd);
-
-    pa_start = (void *)(uintptr_t(p) & ~(page_size - 1));
-    total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start);
-    munmap(pa_start, total_size);
-}
-
 #else
 #error "Memory mapping functions are not defined for your OS."
 #endif
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -36,25 +36,12 @@ MarkPagesUnused(JSRuntime *rt, void *p, 
 // platforms.
 bool
 MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
 
 // Returns #(hard faults) + #(soft faults)
 size_t
 GetPageFaultCount();
 
-// Allocate mapped memory for object from file descriptor, offset and length
-// of the file.
-// The new_fd is duplicated from original fd, for the purpose of cloned object.
-// The offset must be aligned according to alignment requirement.
-// An additional page might be allocated depending on offset and header size given.
-void *
-AllocateMappedObject(int fd, int *new_fd, size_t offset, size_t length,
-                     size_t alignment, size_t header);
-
-// Deallocate mapped memory of the object.
-void
-DeallocateMappedObject(int fd, void *p, size_t length);
-
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -37,28 +37,35 @@ JS::Zone::Zone(JSRuntime *rt)
     usedByExclusiveThread(false),
     scheduledForDestruction(false),
     maybeAlive(true),
     gcMallocBytes(0),
     gcMallocGCTriggered(false),
     gcGrayRoots(),
     data(nullptr),
     types(this)
+#ifdef JS_ION
+    , jitZone_(nullptr)
+#endif
 {
     /* Ensure that there are no vtables to mess us up here. */
     JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==
               static_cast<JS::shadow::Zone *>(this));
 
     setGCMaxMallocBytes(rt->gcMaxMallocBytes * 0.9);
 }
 
 Zone::~Zone()
 {
     if (this == runtimeFromMainThread()->systemZone)
         runtimeFromMainThread()->systemZone = nullptr;
+
+#ifdef JS_ION
+    js_delete(jitZone_);
+#endif
 }
 
 bool
 Zone::init(JSContext *cx)
 {
     types.init(cx);
     return true;
 }
@@ -164,16 +171,19 @@ Zone::sweepBreakpoints(FreeOp *fop)
         }
     }
 }
 
 void
 Zone::discardJitCode(FreeOp *fop)
 {
 #ifdef JS_ION
+    if (!jitZone())
+        return;
+
     if (isPreservingCode()) {
         PurgeJITCaches(this);
     } else {
 
 # ifdef DEBUG
         /* Assert no baseline scripts are marked as active. */
         for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
@@ -200,30 +210,43 @@ Zone::discardJitCode(FreeOp *fop)
             /*
              * Use counts for scripts are reset on GC. After discarding code we
              * need to let it warm back up to get information such as which
              * opcodes are setting array holes or accessing getter properties.
              */
             script->resetUseCount();
         }
 
-        for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
-            jit::FinishDiscardJitCode(fop, comp);
+        jitZone()->optimizedStubSpace()->free();
     }
 #endif
 }
 
 uint64_t
 Zone::gcNumber()
 {
     // Zones in use by exclusive threads are not collected, and threads using
     // them cannot access the main runtime's gcNumber without racing.
     return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gcNumber;
 }
 
+#ifdef JS_ION
+js::jit::JitZone *
+Zone::createJitZone(JSContext *cx)
+{
+    MOZ_ASSERT(!jitZone_);
+
+    if (!cx->runtime()->getJitRuntime(cx))
+        return nullptr;
+
+    jitZone_ = cx->new_<js::jit::JitZone>();
+    return jitZone_;
+}
+#endif
+
 JS::Zone *
 js::ZoneOfObject(const JSObject &obj)
 {
     return obj.zone();
 }
 
 JS::Zone *
 js::ZoneOfObjectFromAnyThread(const JSObject &obj)
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -13,16 +13,20 @@
 #include "jscntxt.h"
 #include "jsgc.h"
 #include "jsinfer.h"
 
 #include "gc/FindSCCs.h"
 
 namespace js {
 
+namespace jit {
+class JitZone;
+}
+
 /*
  * Encapsulates the data needed to perform allocation.  Typically there is
  * precisely one of these per zone (|cx->zone().allocator|).  However, in
  * parallel execution mode, there will be one per worker thread.
  */
 class Allocator
 {
     /*
@@ -275,17 +279,19 @@ struct Zone : public JS::shadow::Zone,
     Zone(JSRuntime *rt);
     ~Zone();
     bool init(JSContext *cx);
 
     void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone> &finder);
 
     void discardJitCode(js::FreeOp *fop);
 
-    void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t *typePool);
+    void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+                                size_t *typePool,
+                                size_t *baselineStubsOptimized);
 
     void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
     void reduceGCTriggerBytes(size_t amount);
 
     void resetGCMallocBytes();
     void setGCMaxMallocBytes(size_t value);
     void updateMallocCounter(size_t nbytes) {
         /*
@@ -311,16 +317,29 @@ struct Zone : public JS::shadow::Zone,
     }
 
     js::types::TypeZone types;
 
     void sweep(js::FreeOp *fop, bool releaseTypes);
 
   private:
     void sweepBreakpoints(js::FreeOp *fop);
+
+#ifdef JS_ION
+    js::jit::JitZone *jitZone_;
+    js::jit::JitZone *createJitZone(JSContext *cx);
+
+  public:
+    js::jit::JitZone *getJitZone(JSContext *cx) {
+        return jitZone_ ? jitZone_ : createJitZone(cx);
+    }
+    js::jit::JitZone *jitZone() {
+        return jitZone_;
+    }
+#endif
 };
 
 } /* namespace JS */
 
 namespace js {
 
 /*
  * Using the atoms zone without holding the exclusive access lock is dangerous
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug953164.js
@@ -0,0 +1,20 @@
+function test(a) {
+    var total = 0;
+    for (var i=0; i<100; i++) {
+
+        var j = 1;
+        var b = a.a
+        if (b) {
+            j += b.test;
+        }
+        total += j;
+    }
+    print(total)
+}
+
+var a1 = {"a": {"test":1}};
+var a2 = {"a": undefined};
+test(a1)
+test(a2)
+test(a1)
+test(a2)
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/parallel/bug977853-convert-doubles.js
@@ -0,0 +1,63 @@
+// Bug 977853 -- Pared down version of script exhibiting negative
+// interaction with convert to doubles optimization. See bug for gory
+// details.
+
+if (!getBuildConfiguration().parallelJS)
+  quit();
+
+load(libdir + "parallelarray-helpers.js")
+
+var numIters = 5;
+var golden_output;
+
+function PJS_div4(v, s)
+{
+  return [ v[0]/s, v[1]/s, v[2]/s, v[3]/s ];
+}
+
+function PJS_normalized(v)
+{
+  var d = Math.sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
+  d = d > 0.0 ? d : 1.0;
+  var result = [ v[0]/d, v[1]/d, v[2]/d, 1.0 ];
+  return result;
+}
+
+// This is the elemental function passed to mapPar
+function PJS_displace(p)
+{
+  var position = [p[0], p[1], p[2], 1.0];
+  var normal = position;
+  var roughness = 0.025 / 0.35;
+  normal = PJS_normalized(PJS_div4(normal, roughness));
+  return null;
+}
+var NUM_VERTEX_COMPONENTS = 3;
+var initPos, nVertices;
+var userData = {
+  nVertices : 25, //2880,
+  initPos : [],
+};
+function setup() {
+  for(var k = 0; k < NUM_VERTEX_COMPONENTS*userData.nVertices; k++) {
+    userData.initPos[k] = k/1000;
+  }
+  nVertices	= userData.nVertices;
+  initPos		= new Array(nVertices);
+  for(var i=0, j=0; i<nVertices; i++, j+=NUM_VERTEX_COMPONENTS) {
+	initPos[i] = [userData.initPos[j],
+				  userData.initPos[j+1],
+				  userData.initPos[j+2]];
+  }
+}
+function SimulatePJS() {
+  var curPosAndNor;
+
+  // Measure Parallel Execution
+  assertParallelExecSucceeds(
+    function(m) { return initPos.mapPar(PJS_displace, m); },
+    function() { });
+}
+var start_time, elapsed_parallel = 0, elapsed_sequential = 0;
+setup();
+SimulatePJS();
--- a/js/src/jit-test/tests/parallel/closure-nested-branch.js
+++ b/js/src/jit-test/tests/parallel/closure-nested-branch.js
@@ -1,22 +1,13 @@
 load(libdir + "parallelarray-helpers.js");
 
 function testClosureCreationAndInvocation() {
   var a = range(0, 64);
   function makeaddv(v) {
-    var u = 1;
-    var t = 2;
-    var s = 3;
-    var r = 4;
-    var q = 5;
-    var p = 6;
-    var o = 7;
-    var n = 8;
-    var m = 9;
     var l = 10;
     var k = 11;
     var j = 12;
     var i = 13;
     var h = 14;
     var g = 15;
     var f = 16;
     var e = 17;
@@ -29,21 +20,16 @@ function testClosureCreationAndInvocatio
             : function (x) {
               switch (x) {
               case 0: return a; case 1: return b;
               case 2: return c; case 3: return d;
               case 4: return e; case 5: return f;
               case 6: return g; case 7: return h;
               case 8: return i; case 9: return j;
               case 10: return k; case 11: return l;
-              case 12: return m; case 13: return n;
-              case 14: return o; case 15: return p;
-              case 16: return q; case 17: return r;
-              case 18: return s; case 19: return t;
-              case 20: return u;
               }
             });
   }
   var m;
   for (var i in MODES) m = a.mapPar(makeaddv, MODES[i]);
   assertEq(m[21](1), 20); // v == 21; x == 1 ==> inner function returns b == 20
 
   var n = a.mapPar(function (v) { return function (x) { return v; }});
--- a/js/src/jit-test/tests/parallel/closure-nested-compute.js
+++ b/js/src/jit-test/tests/parallel/closure-nested-compute.js
@@ -1,22 +1,13 @@
 load(libdir + "parallelarray-helpers.js");
 
 function testClosureCreationAndInvocation() {
   var a = range(0, 64);
   function makeaddv(v) {
-    var u = 1;
-    var t = 2;
-    var s = 3;
-    var r = 4;
-    var q = 5;
-    var p = 6;
-    var o = 7;
-    var n = 8;
-    var m = 9;
     var l = 10;
     var k = 11;
     var j = 12;
     var i = 13;
     var h = 14;
     var g = 15;
     var f = 16;
     var e = 17;
@@ -27,21 +18,16 @@ function testClosureCreationAndInvocatio
     return function (x) {
       switch (x) {
       case 0: return a; case 1: return b;
       case 2: return c; case 3: return d;
       case 4: return e; case 5: return f;
       case 6: return g; case 7: return h;
       case 8: return i; case 9: return j;
       case 10: return k; case 11: return l;
-      case 12: return m; case 13: return n;
-      case 14: return o; case 15: return p;
-      case 16: return q; case 17: return r;
-      case 18: return s; case 19: return t;
-      case 20: return u;
       }
     };
   };
   for (var i in MODES) {
     var m = a.mapPar(makeaddv, MODES[i]);
     assertEq(m[21](1), 20); // v == 21; x == 1 ==> inner function returns b == 20
   }
 }
--- a/js/src/jit/AsmJSLink.cpp
+++ b/js/src/jit/AsmJSLink.cpp
@@ -458,34 +458,18 @@ HandleDynamicLinkFailure(JSContext *cx, 
     options.setOriginPrincipals(module.scriptSource()->originPrincipals())
            .setCompileAndGo(false)
            .setNoScriptRval(false);
 
     if (!frontend::CompileFunctionBody(cx, &fun, options, formals, src->chars(), end - begin))
         return false;
 
     // Call the function we just recompiled.
-
-    unsigned argc = args.length();
-
-    InvokeArgs args2(cx);
-    if (!args2.init(argc))
-        return false;
-
-    args2.setCallee(ObjectValue(*fun));
-    args2.setThis(args.thisv());
-    for (unsigned i = 0; i < argc; i++)
-        args2[i].set(args[i]);
-
-    if (!Invoke(cx, args2))
-        return false;
-
-    args.rval().set(args2.rval());
-
-    return true;
+    args.setCallee(ObjectValue(*fun));
+    return Invoke(cx, args);
 }
 
 #ifdef MOZ_VTUNE
 static bool
 SendFunctionsToVTune(JSContext *cx, AsmJSModule &module)
 {
     uint8_t *base = module.codeBase();
 
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -527,17 +527,17 @@ InitFromBailout(JSContext *cx, HandleScr
     IonSpew(IonSpew_BaselineBailouts, "      FrameSize=%d", (int) frameSize);
     blFrame->setFrameSize(frameSize);
 
     uint32_t flags = 0;
 
     // If SPS Profiler is enabled, mark the frame as having pushed an SPS entry.
     // This may be wrong for the last frame of ArgumentCheck bailout, but
     // that will be fixed later.
-    if (cx->runtime()->spsProfiler.enabled() && ionScript->hasSPSInstrumentation()) {
+    if (ionScript->hasSPSInstrumentation()) {
         IonSpew(IonSpew_BaselineBailouts, "      Setting SPS flag on frame!");
         flags |= BaselineFrame::HAS_PUSHED_SPS_FRAME;
     }
 
     // Initialize BaselineFrame's scopeChain and argsObj
     JSObject *scopeChain = nullptr;
     Value returnValue;
     ArgumentsObject *argsObj = nullptr;
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -1082,17 +1082,17 @@ class ICStubCompiler
 #endif
 
   public:
     virtual ICStub *getStub(ICStubSpace *space) = 0;
 
     ICStubSpace *getStubSpace(JSScript *script) {
         if (ICStub::CanMakeCalls(kind))
             return script->baselineScript()->fallbackStubSpace();
-        return script->compartment()->jitCompartment()->optimizedStubSpace();
+        return script->zone()->jitZone()->optimizedStubSpace();
     }
 };
 
 // Base class for stub compilers that can generate multiple stubcodes.
 // These compilers need access to the JSOp they are compiling for.
 class ICMultiStubCompiler : public ICStubCompiler
 {
   protected:
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -967,18 +967,17 @@ CodeGenerator::visitLambda(LLambda *lir)
 
     OutOfLineCode *ool = oolCallVM(LambdaInfo, lir, (ArgList(), ImmGCPtr(info.fun), scopeChain),
                                    StoreRegisterTo(output));
     if (!ool)
         return false;
 
     JS_ASSERT(!info.singletonType);
 
-    masm.newGCThing(output, tempReg, info.fun, ool->entry(), gc::DefaultHeap);
-    masm.initGCThing(output, tempReg, info.fun);
+    masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry());
 
     emitLambdaInit(output, scopeChain, info);
 
     masm.bind(ool->rejoin());
     return true;
 }
 
 void
@@ -3332,39 +3331,16 @@ CodeGenerator::visitNewDerivedTypedObjec
     JS_ASSERT(gen->info().executionMode() == SequentialExecution);
 
     pushArg(ToRegister(lir->offset()));
     pushArg(ToRegister(lir->owner()));
     pushArg(ToRegister(lir->type()));
     return callVM(CreateDerivedTypedObjInfo, lir);
 }
 
-bool
-CodeGenerator::visitNewSlots(LNewSlots *lir)
-{
-    Register temp1 = ToRegister(lir->temp1());
-    Register temp2 = ToRegister(lir->temp2());
-    Register temp3 = ToRegister(lir->temp3());
-    Register output = ToRegister(lir->output());
-
-    masm.mov(ImmPtr(GetIonContext()->runtime), temp1);
-    masm.mov(ImmWord(lir->mir()->nslots()), temp2);
-
-    masm.setupUnalignedABICall(2, temp3);
-    masm.passABIArg(temp1);
-    masm.passABIArg(temp2);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NewSlots));
-
-    masm.testPtr(output, output);
-    if (!bailoutIf(Assembler::Zero, lir->snapshot()))
-        return false;
-
-    return true;
-}
-
 bool CodeGenerator::visitAtan2D(LAtan2D *lir)
 {
     Register temp = ToRegister(lir->temp());
     FloatRegister y = ToFloatRegister(lir->y());
     FloatRegister x = ToFloatRegister(lir->x());
 
     masm.setupUnalignedABICall(2, temp);
     masm.passABIArg(y, MoveOp::DOUBLE);
@@ -3403,18 +3379,17 @@ CodeGenerator::visitNewArray(LNewArray *
 
     if (lir->mir()->shouldUseVM())
         return visitNewArrayCallVM(lir);
 
     OutOfLineNewArray *ool = new(alloc()) OutOfLineNewArray(lir);
     if (!addOutOfLineCode(ool))
         return false;
 
-    masm.newGCThing(objReg, tempReg, templateObject, ool->entry(), lir->mir()->initialHeap());
-    masm.initGCThing(objReg, tempReg, templateObject);
+    masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry());
 
     masm.bind(ool->rejoin());
     return true;
 }
 
 bool
 CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray *ool)
 {
@@ -3490,18 +3465,17 @@ CodeGenerator::visitNewObject(LNewObject
 
     if (lir->mir()->shouldUseVM())
         return visitNewObjectVMCall(lir);
 
     OutOfLineNewObject *ool = new(alloc()) OutOfLineNewObject(lir);
     if (!addOutOfLineCode(ool))
         return false;
 
-    masm.newGCThing(objReg, tempReg, templateObject, ool->entry(), lir->mir()->initialHeap());
-    masm.initGCThing(objReg, tempReg, templateObject);
+    masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry());
 
     masm.bind(ool->rejoin());
     return true;
 }
 
 bool
 CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject *ool)
 {
@@ -3526,91 +3500,64 @@ CodeGenerator::visitNewDeclEnvObject(LNe
     // If we have a template object, we can inline call object creation.
     OutOfLineCode *ool = oolCallVM(NewDeclEnvObjectInfo, lir,
                                    (ArgList(), ImmGCPtr(info.funMaybeLazy()),
                                     Imm32(gc::DefaultHeap)),
                                    StoreRegisterTo(objReg));
     if (!ool)
         return false;
 
-    masm.newGCThing(objReg, tempReg, templateObj, ool->entry(), gc::DefaultHeap);
-    masm.initGCThing(objReg, tempReg, templateObj);
+    masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry());
+
     masm.bind(ool->rejoin());
     return true;
 }
 
-typedef JSObject *(*NewCallObjectFn)(JSContext *, HandleScript, HandleShape,
-                                     HandleTypeObject, HeapSlot *);
+typedef JSObject *(*NewCallObjectFn)(JSContext *, HandleScript, HandleShape, HandleTypeObject);
 static const VMFunction NewCallObjectInfo =
     FunctionInfo<NewCallObjectFn>(NewCallObject);
 
 bool
 CodeGenerator::visitNewCallObject(LNewCallObject *lir)
 {
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
 
     JSObject *templateObj = lir->mir()->templateObject();
 
     // If we have a template object, we can inline call object creation.
-    OutOfLineCode *ool;
-    if (lir->slots()->isRegister()) {
-        ool = oolCallVM(NewCallObjectInfo, lir,
-                        (ArgList(), ImmGCPtr(lir->mir()->block()->info().script()),
-                                    ImmGCPtr(templateObj->lastProperty()),
-                                    ImmGCPtr(templateObj->hasSingletonType() ? nullptr : templateObj->type()),
-                                    ToRegister(lir->slots())),
-                        StoreRegisterTo(objReg));
-    } else {
-        ool = oolCallVM(NewCallObjectInfo, lir,
-                        (ArgList(), ImmGCPtr(lir->mir()->block()->info().script()),
-                                    ImmGCPtr(templateObj->lastProperty()),
-                                    ImmGCPtr(templateObj->hasSingletonType() ? nullptr : templateObj->type()),
-                                    ImmPtr(nullptr)),
-                        StoreRegisterTo(objReg));
-    }
+    OutOfLineCode *ool = oolCallVM(NewCallObjectInfo, lir,
+                                   (ArgList(), ImmGCPtr(lir->mir()->block()->info().script()),
+                                               ImmGCPtr(templateObj->lastProperty()),
+                                               ImmGCPtr(templateObj->hasSingletonType() ? nullptr : templateObj->type())),
+                                   StoreRegisterTo(objReg));
     if (!ool)
         return false;
 
     if (lir->mir()->needsSingletonType()) {
         // Objects can only be given singleton types in VM calls.
         masm.jump(ool->entry());
     } else {
-        masm.newGCThing(objReg, tempReg, templateObj, ool->entry(), gc::DefaultHeap);
-        masm.initGCThing(objReg, tempReg, templateObj);
-
-        if (lir->slots()->isRegister())
-            masm.storePtr(ToRegister(lir->slots()), Address(objReg, JSObject::offsetOfSlots()));
+        masm.createGCObject(objReg, tempReg, templateObj, gc::DefaultHeap, ool->entry());
     }
 
     masm.bind(ool->rejoin());
     return true;
 }
 
 bool
 CodeGenerator::visitNewCallObjectPar(LNewCallObjectPar *lir)
 {
     Register resultReg = ToRegister(lir->output());
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register tempReg1 = ToRegister(lir->getTemp0());
     Register tempReg2 = ToRegister(lir->getTemp1());
     JSObject *templateObj = lir->mir()->templateObj();
 
     emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
-
-    // NB: !lir->slots()->isRegister() implies that there is no slots
-    // array at all, and the memory is already zeroed when copying
-    // from the template object
-
-    if (lir->slots()->isRegister()) {
-        Register slotsReg = ToRegister(lir->slots());
-        JS_ASSERT(slotsReg != resultReg);
-        masm.storePtr(slotsReg, Address(resultReg, JSObject::offsetOfSlots()));
-    }
-
     return true;
 }
 
 bool
 CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
 {
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register lengthReg = ToRegister(lir->length());
@@ -3659,18 +3606,17 @@ CodeGenerator::visitNewStringObject(LNew
 
     StringObject *templateObj = lir->mir()->templateObj();
 
     OutOfLineCode *ool = oolCallVM(NewStringObjectInfo, lir, (ArgList(), input),
                                    StoreRegisterTo(output));
     if (!ool)
         return false;
 
-    masm.newGCThing(output, temp, templateObj, ool->entry(), gc::DefaultHeap);
-    masm.initGCThing(output, temp, templateObj);
+    masm.createGCObject(output, temp, templateObj, gc::DefaultHeap, ool->entry());
 
     masm.loadStringLength(input, temp);
 
     masm.storeValue(JSVAL_TYPE_STRING, input, Address(output, StringObject::offsetOfPrimitiveValue()));
     masm.storeValue(JSVAL_TYPE_INT32, temp, Address(output, StringObject::offsetOfLength()));
 
     masm.bind(ool->rejoin());
     return true;
@@ -3905,17 +3851,17 @@ CodeGenerator::visitCreateThisWithTempla
 
     OutOfLineCode *ool = oolCallVM(NewGCObjectInfo, lir,
                                    (ArgList(), Imm32(allocKind), Imm32(initialHeap)),
                                    StoreRegisterTo(objReg));
     if (!ool)
         return false;
 
     // Allocate. If the FreeList is empty, call to VM, which may GC.
-    masm.newGCThing(objReg, tempReg, templateObject, ool->entry(), lir->mir()->initialHeap());
+    masm.newGCThing(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry());
 
     // Initialize based on the templateObject.
     masm.bind(ool->rejoin());
     masm.initGCThing(objReg, tempReg, templateObject);
 
     return true;
 }
 
@@ -4974,16 +4920,80 @@ JitCompartment::generateStringConcatStub
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "StringConcatStub");
 #endif
 
     return code;
 }
 
+JitCode *
+JitRuntime::generateMallocStub(JSContext *cx)
+{
+    const Register regReturn = CallTempReg0;
+    const Register regNBytes = CallTempReg0;
+    const Register regRuntime = CallTempReg1;
+    const Register regTemp = CallTempReg1;
+
+    MacroAssembler masm(cx);
+
+    RegisterSet regs = RegisterSet::Volatile();
+    regs.takeUnchecked(regNBytes);
+    masm.PushRegsInMask(regs);
+
+    masm.setupUnalignedABICall(2, regTemp);
+    masm.movePtr(ImmPtr(cx->runtime()), regRuntime);
+    masm.passABIArg(regRuntime);
+    masm.passABIArg(regNBytes);
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MallocWrapper));
+    masm.storeCallResult(regReturn);
+
+    masm.PopRegsInMask(regs);
+    masm.ret();
+
+    Linker linker(masm);
+    JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+    writePerfSpewerJitCodeProfile(code, "MallocStub");
+#endif
+
+    return code;
+}
+
+JitCode *
+JitRuntime::generateFreeStub(JSContext *cx)
+{
+    const Register regSlots = CallTempReg0;
+    const Register regTemp = CallTempReg1;
+
+    MacroAssembler masm(cx);
+
+    RegisterSet regs = RegisterSet::Volatile();
+    regs.takeUnchecked(regSlots);
+    masm.PushRegsInMask(regs);
+
+    masm.setupUnalignedABICall(1, regTemp);
+    masm.passABIArg(regSlots);
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js_free));
+
+    masm.PopRegsInMask(regs);
+
+    masm.ret();
+
+    Linker linker(masm);
+    JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+    writePerfSpewerJitCodeProfile(code, "FreeStub");
+#endif
+
+    return code;
+}
+
 typedef bool (*CharCodeAtFn)(JSContext *, HandleString, int32_t, uint32_t *);
 static const VMFunction CharCodeAtInfo = FunctionInfo<CharCodeAtFn>(jit::CharCodeAt);
 
 bool
 CodeGenerator::visitCharCodeAt(LCharCodeAt *lir)
 {
     Register str = ToRegister(lir->str());
     Register index = ToRegister(lir->index());
@@ -5627,19 +5637,17 @@ CodeGenerator::visitArrayConcat(LArrayCo
     masm.load32(Address(temp1, ObjectElements::offsetOfInitializedLength()), temp2);
     masm.branch32(Assembler::NotEqual, Address(temp1, ObjectElements::offsetOfLength()), temp2, &fail);
 
     masm.loadPtr(Address(rhs, JSObject::offsetOfElements()), temp1);
     masm.load32(Address(temp1, ObjectElements::offsetOfInitializedLength()), temp2);
     masm.branch32(Assembler::NotEqual, Address(temp1, ObjectElements::offsetOfLength()), temp2, &fail);
 
     // Try to allocate an object.
-    JSObject *templateObj = lir->mir()->templateObj();
-    masm.newGCThing(temp1, temp2, templateObj, &fail, lir->mir()->initialHeap());
-    masm.initGCThing(temp1, temp2, templateObj);
+    masm.createGCObject(temp1, temp2, lir->mir()->templateObj(), lir->mir()->initialHeap(), &fail);
     masm.jump(&call);
     {
         masm.bind(&fail);
         masm.movePtr(ImmPtr(nullptr), temp1);
     }
     masm.bind(&call);
 
     pushArg(temp1);
@@ -6000,18 +6008,17 @@ CodeGenerator::visitRest(LRest *lir)
     Register numActuals = ToRegister(lir->numActuals());
     Register temp0 = ToRegister(lir->getTemp(0));
     Register temp1 = ToRegister(lir->getTemp(1));
     Register temp2 = ToRegister(lir->getTemp(2));
     unsigned numFormals = lir->mir()->numFormals();
     JSObject *templateObject = lir->mir()->templateObject();
 
     Label joinAlloc, failAlloc;
-    masm.newGCThing(temp2, temp0, templateObject, &failAlloc, gc::DefaultHeap);
-    masm.initGCThing(temp2, temp0, templateObject);
+    masm.createGCObject(temp2, temp0, templateObject, gc::DefaultHeap, &failAlloc);
     masm.jump(&joinAlloc);
     {
         masm.bind(&failAlloc);
         masm.movePtr(ImmPtr(nullptr), temp2);
     }
     masm.bind(&joinAlloc);
 
     return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -126,17 +126,16 @@ class CodeGenerator : public CodeGenerat
     bool visitBail(LBail *lir);
     bool visitGetDynamicName(LGetDynamicName *lir);
     bool visitFilterArgumentsOrEvalS(LFilterArgumentsOrEvalS *lir);
     bool visitFilterArgumentsOrEvalV(LFilterArgumentsOrEvalV *lir);
     bool visitCallDirectEvalS(LCallDirectEvalS *lir);
     bool visitCallDirectEvalV(LCallDirectEvalV *lir);
     bool visitDoubleToInt32(LDoubleToInt32 *lir);
     bool visitFloat32ToInt32(LFloat32ToInt32 *lir);
-    bool visitNewSlots(LNewSlots *lir);
     bool visitNewArrayCallVM(LNewArray *lir);
     bool visitNewArray(LNewArray *lir);
     bool visitOutOfLineNewArray(OutOfLineNewArray *ool);
     bool visitNewObjectVMCall(LNewObject *lir);
     bool visitNewObject(LNewObject *lir);
     bool visitOutOfLineNewObject(OutOfLineNewObject *ool);
     bool visitNewDeclEnvObject(LNewDeclEnvObject *lir);
     bool visitNewCallObject(LNewCallObject *lir);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -263,16 +263,26 @@ JitRuntime::initialize(JSContext *cx)
     if (!valuePreBarrier_)
         return false;
 
     IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Shape");
     shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape);
     if (!shapePreBarrier_)
         return false;
 
+    IonSpew(IonSpew_Codegen, "# Emitting malloc stub");
+    mallocStub_ = generateMallocStub(cx);
+    if (!mallocStub_)
+        return false;
+
+    IonSpew(IonSpew_Codegen, "# Emitting free stub");
+    freeStub_ = generateFreeStub(cx);
+    if (!freeStub_)
+        return false;
+
     IonSpew(IonSpew_Codegen, "# Emitting VM function wrappers");
     for (VMFunction *fun = VMFunction::functions; fun; fun = fun->next) {
         if (!generateVMWrapper(cx, *fun))
             return false;
     }
 
     return true;
 }
@@ -450,19 +460,18 @@ jit::RequestInterruptForIonCode(JSRuntim
         // Nothing more needs to be done.
         break;
 
       default:
         MOZ_ASSUME_UNREACHABLE("Bad interrupt mode");
     }
 }
 
-JitCompartment::JitCompartment(JitRuntime *rt)
-  : rt(rt),
-    stubCodes_(nullptr),
+JitCompartment::JitCompartment()
+  : stubCodes_(nullptr),
     baselineCallReturnAddr_(nullptr),
     baselineGetPropReturnAddr_(nullptr),
     baselineSetPropReturnAddr_(nullptr),
     stringConcatStub_(nullptr),
     parallelStringConcatStub_(nullptr)
 {
 }
 
@@ -557,17 +566,17 @@ JitCompartment::mark(JSTracer *trc, JSCo
     // Cancel any active or pending off thread compilations. Note that the
     // MIR graph does not hold any nursery pointers, so there's no need to
     // do this for minor GCs.
     JS_ASSERT(!trc->runtime->isHeapMinorCollecting());
     CancelOffThreadIonCompile(compartment, nullptr);
     FinishAllOffThreadCompilations(compartment);
 
     // Free temporary OSR buffer.
-    rt->freeOsrTempData();
+    trc->runtime->jitRuntime()->freeOsrTempData();
 }
 
 void
 JitCompartment::sweep(FreeOp *fop)
 {
     stubCodes_->sweep(fop);
 
     // If the sweep removed the ICCall_Fallback stub, nullptr the baselineCallReturnAddr_ field.
@@ -2697,24 +2706,16 @@ jit::FinishInvalidation(FreeOp *fop, JSS
     if (script->hasIonScript())
         FinishInvalidationOf(fop, script, script->ionScript(), false);
 
     if (script->hasParallelIonScript())
         FinishInvalidationOf(fop, script, script->parallelIonScript(), true);
 }
 
 void
-jit::FinishDiscardJitCode(FreeOp *fop, JSCompartment *comp)
-{
-    // Free optimized baseline stubs.
-    if (comp->jitCompartment())
-        comp->jitCompartment()->optimizedStubSpace()->free();
-}
-
-void
 jit::MarkValueFromIon(JSRuntime *rt, Value *vp)
 {
     gc::MarkValueUnbarriered(&rt->gcMarker, vp, "write barrier");
 }
 
 void
 jit::MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
 {
@@ -2913,18 +2914,9 @@ AutoDebugModeInvalidation::~AutoDebugMod
             FinishInvalidation(fop, script);
             FinishDiscardBaselineScript(fop, script);
             // script->clearAnalysis();
             script->resetUseCount();
         } else if (script->hasBaselineScript()) {
             script->baselineScript()->resetActive();
         }
     }
-
-    if (comp_) {
-        FinishDiscardJitCode(fop, comp_);
-    } else {
-        for (CompartmentsInZoneIter comp(zone_); !comp.done(); comp.next()) {
-            if (comp->principals)
-                FinishDiscardJitCode(fop, comp);
-        }
-    }
 }
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -1642,52 +1642,41 @@ TryEliminateTypeBarrierFromTest(MTypeBar
     // Disregard the possible unbox added before the Typebarrier for checking.
     MDefinition *input = barrier->input();
     MUnbox *inputUnbox = nullptr;
     if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) {
         inputUnbox = input->toUnbox();
         input = inputUnbox->input();
     }
 
-    if (test->getOperand(0) == input && direction == TRUE_BRANCH) {
-        *eliminated = true;
-        if (inputUnbox)
-            inputUnbox->makeInfallible();
-        barrier->replaceAllUsesWith(barrier->input());
+    MDefinition *subject = nullptr;
+    bool removeUndefined;
+    bool removeNull;
+    test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull);
+
+    // The Test doesn't filter undefined nor null.
+    if (!subject)
         return;
-    }
 
-    if (!test->getOperand(0)->isCompare())
+    // Make sure the subject equals the input to the TypeBarrier.
+    if (subject != input)
         return;
 
-    MCompare *compare = test->getOperand(0)->toCompare();
-    MCompare::CompareType compareType = compare->compareType();
-
-    if (compareType != MCompare::Compare_Undefined && compareType != MCompare::Compare_Null)
-        return;
-    if (compare->getOperand(0) != input)
+    // When the TypeBarrier filters undefined, the test must at least also do,
+    // this, before the TypeBarrier can get removed.
+    if (!removeUndefined && filtersUndefined)
         return;
 
-    JSOp op = compare->jsop();
-    JS_ASSERT(op == JSOP_EQ || op == JSOP_STRICTEQ ||
-              op == JSOP_NE || op == JSOP_STRICTNE);
-
-    if ((direction == TRUE_BRANCH) != (op == JSOP_NE || op == JSOP_STRICTNE))
+    // When the TypeBarrier filters null, the test must at least also do,
+    // this, before the TypeBarrier can get removed.
+    if (!removeNull && filtersNull)
         return;
 
-    // A test 'if (x.f != null)' or 'if (x.f != undefined)' filters both null
-    // and undefined. If strict equality is used, only the specified rhs is
-    // tested for.
-    if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
-        if (compareType == MCompare::Compare_Undefined && !filtersUndefined)
-            return;
-        if (compareType == MCompare::Compare_Null && !filtersNull)
-            return;
-    }
-
+    // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept,
+    // but made infallible.
     *eliminated = true;
     if (inputUnbox)
         inputUnbox->makeInfallible();
     barrier->replaceAllUsesWith(barrier->input());
 }
 
 static bool
 TryEliminateTypeBarrier(MTypeBarrier *barrier, bool *eliminated)
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -184,50 +184,55 @@ IonBuilder::spew(const char *message)
 static inline int32_t
 GetJumpOffset(jsbytecode *pc)
 {
     JS_ASSERT(js_CodeSpec[JSOp(*pc)].type() == JOF_JUMP);
     return GET_JUMP_OFFSET(pc);
 }
 
 IonBuilder::CFGState
-IonBuilder::CFGState::If(jsbytecode *join, MBasicBlock *ifFalse)
+IonBuilder::CFGState::If(jsbytecode *join, MTest *test)
 {
     CFGState state;
     state.state = IF_TRUE;
     state.stopAt = join;
-    state.branch.ifFalse = ifFalse;
+    state.branch.ifFalse = test->ifFalse();
+    state.branch.test = test;
     return state;
 }
 
 IonBuilder::CFGState
-IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MBasicBlock *ifFalse)
-{
+IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MTest *test)
+{
+    MBasicBlock *ifFalse = test->ifFalse();
+
     CFGState state;
     // If the end of the false path is the same as the start of the
     // false path, then the "else" block is empty and we can devolve
     // this to the IF_TRUE case. We handle this here because there is
     // still an extra GOTO on the true path and we want stopAt to point
     // there, whereas the IF_TRUE case does not have the GOTO.
     state.state = (falseEnd == ifFalse->pc())
                   ? IF_TRUE_EMPTY_ELSE
                   : IF_ELSE_TRUE;
     state.stopAt = trueEnd;
     state.branch.falseEnd = falseEnd;
     state.branch.ifFalse = ifFalse;
+    state.branch.test = test;
     return state;
 }
 
 IonBuilder::CFGState
 IonBuilder::CFGState::AndOr(jsbytecode *join, MBasicBlock *joinStart)
 {
     CFGState state;
     state.state = AND_OR;
     state.stopAt = join;
     state.branch.ifFalse = joinStart;
+    state.branch.test = nullptr;
     return state;
 }
 
 IonBuilder::CFGState
 IonBuilder::CFGState::TableSwitch(jsbytecode *exitpc, MTableSwitch *ins)
 {
     CFGState state;
     state.state = TABLE_SWITCH;
@@ -1880,16 +1885,20 @@ IonBuilder::processIfElseTrueEnd(CFGStat
     // We've reached the end of the true branch of an if-else. Don't
     // create an edge yet, just transition to parsing the false branch.
     state.state = CFGState::IF_ELSE_FALSE;
     state.branch.ifTrue = current;
     state.stopAt = state.branch.falseEnd;
     pc = state.branch.ifFalse->pc();
     setCurrentAndSpecializePhis(state.branch.ifFalse);
     graph().moveBlockToEnd(current);
+
+    if (state.branch.test)
+        filterTypesAtTest(state.branch.test);
+
     return ControlStatus_Jumped;
 }
 
 IonBuilder::ControlStatus
 IonBuilder::processIfElseFalseEnd(CFGState &state)
 {
     // Update the state to have the latest block from the false path.
     state.branch.ifFalse = current;
@@ -3004,16 +3013,74 @@ IonBuilder::tableSwitch(JSOp op, jssrcno
     if (!cfgStack_.append(state))
         return ControlStatus_Error;
 
     pc = current->pc();
     return ControlStatus_Jumped;
 }
 
 bool
+IonBuilder::filterTypesAtTest(MTest *test)
+{
+    JS_ASSERT(test->ifTrue() == current || test->ifFalse() == current);
+
+    bool trueBranch = test->ifTrue() == current;
+
+    MDefinition *subject = nullptr;
+    bool removeUndefined;
+    bool removeNull;
+
+    test->filtersUndefinedOrNull(trueBranch, &subject, &removeUndefined, &removeNull);
+
+    // The test filters no undefined or null.
+    if (!subject)
+        return true;
+
+    // There is no TypeSet that can get filtered.
+    if (!subject->resultTypeSet())
+        return true;
+
+    // Only do this optimization if the typeset does contains null or undefined.
+    if ((!(removeUndefined && subject->resultTypeSet()->hasType(types::Type::UndefinedType())) &&
+         !(removeNull && subject->resultTypeSet()->hasType(types::Type::NullType()))))
+    {
+        return true;
+    }
+
+    // Find all values on the stack that correspond to the subject
+    // and replace it with a MIR with filtered TypeSet information.
+    // Create the replacement MIR lazily upon first occurence.
+    MDefinition *replace = nullptr;
+    for (uint32_t i = 0; i < current->stackDepth(); i++) {
+        if (current->getSlot(i) != subject)
+            continue;
+
+        // Create replacement MIR with filtered TypesSet.
+        if (!replace) {
+            types::TemporaryTypeSet *type =
+                subject->resultTypeSet()->filter(alloc_->lifoAlloc(), removeUndefined,
+                                                                      removeNull);
+            if (!type)
+                return false;
+
+            replace = ensureDefiniteTypeSet(subject, type);
+            // Make sure we don't hoist it above the MTest, we can use the
+            // 'dependency' of an MInstruction. This is normally used by
+            // Alias Analysis, but won't get overwritten, since this
+            // instruction doesn't have an AliasSet.
+            replace->setDependency(test);
+        }
+
+        current->setSlot(i, replace);
+    }
+
+   return true;
+}
+
+bool
 IonBuilder::jsop_label()
 {
     JS_ASSERT(JSOp(*pc) == JSOP_LABEL);
 
     jsbytecode *endpc = pc + GET_JUMP_OFFSET(pc);
     JS_ASSERT(endpc > pc);
 
     ControlFlowInfo label(cfgStack_.length(), endpc);
@@ -3421,17 +3488,17 @@ IonBuilder::jsop_ifeq(JSOp op)
     //    ...
     // Z: ...     ; join
     //
     // We want to parse the bytecode as if we were parsing the AST, so for the
     // IF_ELSE/COND cases, we use the source note and follow the GOTO. For the
     // IF case, the IFEQ offset is the join point.
     switch (SN_TYPE(sn)) {
       case SRC_IF:
-        if (!cfgStack_.append(CFGState::If(falseStart, ifFalse)))
+        if (!cfgStack_.append(CFGState::If(falseStart, test)))
             return false;
         break;
 
       case SRC_IF_ELSE:
       case SRC_COND:
       {
         // Infer the join point from the JSOP_GOTO[X] sitting here, then
         // assert as we much we can that this is the right GOTO.
@@ -3440,29 +3507,32 @@ IonBuilder::jsop_ifeq(JSOp op)
         JS_ASSERT(trueEnd < falseStart);
         JS_ASSERT(JSOp(*trueEnd) == JSOP_GOTO);
         JS_ASSERT(!info().getNote(gsn, trueEnd));
 
         jsbytecode *falseEnd = trueEnd + GetJumpOffset(trueEnd);
         JS_ASSERT(falseEnd > trueEnd);
         JS_ASSERT(falseEnd >= falseStart);
 
-        if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, ifFalse)))
+        if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, test)))
             return false;
         break;
       }
 
       default:
         MOZ_ASSUME_UNREACHABLE("unexpected source note type");
     }
 
     // Switch to parsing the true branch. Note that no PC update is needed,
     // it's the next instruction.
     setCurrentAndSpecializePhis(ifTrue);
 
+    // Filter the types in the true branch.
+    filterTypesAtTest(test);
+
     return true;
 }
 
 bool
 IonBuilder::jsop_try()
 {
     JS_ASSERT(JSOp(*pc) == JSOP_TRY);
 
@@ -4592,49 +4662,41 @@ IonBuilder::createDeclEnvObject(MDefinit
 
 MInstruction *
 IonBuilder::createCallObject(MDefinition *callee, MDefinition *scope)
 {
     // Get a template CallObject that we'll use to generate inline object
     // creation.
     CallObject *templateObj = inspector->templateCallObject();
 
-    // If the CallObject needs dynamic slots, allocate those now.
-    MInstruction *slots;
-    if (templateObj->hasDynamicSlots()) {
-        size_t nslots = JSObject::dynamicSlotsCount(templateObj->numFixedSlots(),
-                                                    templateObj->lastProperty()->slotSpan(templateObj->getClass()),
-                                                    templateObj->getClass());
-        slots = MNewSlots::New(alloc(), nslots);
-    } else {
-        slots = MConstant::New(alloc(), NullValue());
-    }
-    current->add(slots);
-
-    // Allocate the actual object. It is important that no intervening
-    // instructions could potentially bailout, thus leaking the dynamic slots
-    // pointer. Run-once scripts need a singleton type, so always do a VM call
-    // in such cases.
-    MNewCallObject *callObj = MNewCallObject::New(alloc(), templateObj, script()->treatAsRunOnce(), slots);
+    // Allocate the object. Run-once scripts need a singleton type, so always do
+    // a VM call in such cases.
+    MNewCallObject *callObj = MNewCallObject::New(alloc(), templateObj, script()->treatAsRunOnce());
     current->add(callObj);
 
     // Initialize the object's reserved slots. No post barrier is needed here,
     // for the same reason as in createDeclEnvObject.
     current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::enclosingScopeSlot(), scope));
     current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee));
 
     // Initialize argument slots.
+    MSlots *slots = nullptr;
     for (AliasedFormalIter i(script()); i; i++) {
         unsigned slot = i.scopeSlot();
         unsigned formal = i.frameIndex();
         MDefinition *param = current->getSlot(info().argSlotUnchecked(formal));
-        if (slot >= templateObj->numFixedSlots())
+        if (slot >= templateObj->numFixedSlots()) {
+            if (!slots) {
+                slots = MSlots::New(alloc(), callObj);
+                current->add(slots);
+            }
             current->add(MStoreSlot::New(alloc(), slots, slot - templateObj->numFixedSlots(), param));
-        else
+        } else {
             current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param));
+        }
     }
 
     return callObj;
 }
 
 MDefinition *
 IonBuilder::createThisScripted(MDefinition *callee)
 {
@@ -6237,16 +6299,36 @@ IonBuilder::ensureDefiniteType(MDefiniti
         break;
       }
     }
 
     current->add(replace);
     return replace;
 }
 
+MDefinition *
+IonBuilder::ensureDefiniteTypeSet(MDefinition *def, types::TemporaryTypeSet *types)
+{
+    // We cannot arbitrarily add a typeset to a definition. It can be shared
+    // in another path. So we always need to create a new MIR.
+
+    // Use ensureDefiniteType to do unboxing. If that happened the type can
+    // be added on the newly created unbox operation.
+    MDefinition *replace = ensureDefiniteType(def, types->getKnownTypeTag());
+    if (replace != def) {
+        replace->setResultTypeSet(types);
+        return replace;
+    }
+
+    // Create a NOP mir instruction to filter the typeset.
+    MFilterTypeSet *filter = MFilterTypeSet::New(alloc(), def, types);
+    current->add(filter);
+    return filter;
+}
+
 static size_t
 NumFixedSlots(JSObject *object)
 {
     // Note: we can't use object->numFixedSlots() here, as this will read the
     // shape and can race with the main thread if we are building off thread.
     // The allocation kind and object class (which goes through the type) can
     // be read freely, however.
     gc::AllocKind kind = object->tenuredGetAllocKind();
@@ -7154,16 +7236,46 @@ IonBuilder::jsop_getelem_dense(MDefiniti
         current->add(load);
 
         // If maybeUndefined was true, the typeset must have undefined, and
         // then either additional types or a barrier. This means we should
         // never have a typed version of LoadElementHole.
         JS_ASSERT(knownType == JSVAL_TYPE_UNKNOWN);
     }
 
+    // If the array is being converted to doubles, but we've observed
+    // just int, substitute a type set of int+double into the observed
+    // type set. The reason for this is that, in the
+    // interpreter+baseline, such arrays may consist of mixed
+    // ints/doubles, but when we enter ion code, we will be coercing
+    // all inputs to doubles. Therefore, the type barrier checking for
+    // just int is highly likely (*almost* guaranteed) to fail sooner
+    // or later. Essentially, by eagerly coercing to double, ion is
+    // making the observed types outdated. To compensate for this, we
+    // substitute a broader observed type set consisting of both ints
+    // and doubles. There is perhaps a tradeoff here, so we limit this
+    // optimization to parallel code, where it is needed to prevent
+    // perpetual bailouts in some extreme cases. (Bug 977853)
+    //
+    // NB: we have not added a MConvertElementsToDoubles MIR, so we
+    // cannot *assume* the result is a double.
+    if (executionMode == ParallelExecution &&
+        barrier &&
+        types->getKnownTypeTag() == JSVAL_TYPE_INT32 &&
+        objTypes &&
+        objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles)
+    {
+        // Note: double implies int32 as well for typesets
+        types = alloc_->lifoAlloc()->new_<types::TemporaryTypeSet>(types::Type::DoubleType());
+        if (!types)
+            return false;
+
+        barrier = false; // Don't need a barrier anymore
+    }
+
     if (knownType != JSVAL_TYPE_UNKNOWN)
         load->setResultType(MIRTypeFromValueType(knownType));
 
     current->push(load);
     return pushTypeBarrier(load, types, barrier);
 }
 
 MInstruction *
@@ -7455,17 +7567,17 @@ IonBuilder::setElemTryScalarElemOfTypedO
     MDefinition *indexAsByteOffset;
     if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objTypeDescrs,
                                        &indexAsByteOffset, &canBeNeutered))
     {
         return false;
     }
 
     // Store the element
-    if (!storeScalarTypedObjectValue(obj, indexAsByteOffset, elemType, canBeNeutered, value))
+    if (!storeScalarTypedObjectValue(obj, indexAsByteOffset, elemType, canBeNeutered, false, value))
         return false;
 
     current->push(value);
 
     *emitted = true;
     return true;
 }
 
@@ -7487,16 +7599,20 @@ IonBuilder::setElemTryTypedStatic(bool *
 
     if (!object->resultTypeSet())
         return true;
     JSObject *tarrObj = object->resultTypeSet()->getSingleton();
     if (!tarrObj)
         return true;
 
     TypedArrayObject *tarr = &tarrObj->as<TypedArrayObject>();
+
+    if (gc::IsInsideNursery(tarr->runtimeFromMainThread(), tarr->viewData()))
+        return true;
+
     ArrayBufferView::ViewType viewType = (ArrayBufferView::ViewType) tarr->type();
 
     MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType);
     if (!ptr)
         return true;
 
     // Emit StoreTypedArrayElementStatic.
     object->setImplicitlyUsedUnchecked();
@@ -7802,16 +7918,38 @@ IonBuilder::jsop_setelem_typed(ScalarTyp
 
     if (safety == SetElem_Normal)
         current->push(value);
 
     return resumeAfter(ins);
 }
 
 bool
+IonBuilder::jsop_setelem_typed_object(ScalarTypeDescr::Type arrayType,
+                                      SetElemSafety safety,
+                                      bool racy,
+                                      MDefinition *object, MDefinition *index, MDefinition *value)
+{
+    JS_ASSERT(safety == SetElem_Unsafe); // Can be fixed, but there's been no reason to as of yet
+
+    MInstruction *int_index = MToInt32::New(alloc(), index);
+    current->add(int_index);
+
+    size_t elemSize = ScalarTypeDescr::alignment(arrayType);
+    MMul *byteOffset = MMul::New(alloc(), int_index, constantInt(elemSize),
+                                        MIRType_Int32, MMul::Integer);
+    current->add(byteOffset);
+
+    if (!storeScalarTypedObjectValue(object, byteOffset, arrayType, false, racy, value))
+        return false;
+
+    return true;
+}
+
+bool
 IonBuilder::jsop_length()
 {
     if (jsop_length_fastPath())
         return true;
 
     PropertyName *name = info().getAtom(pc)->asPropertyName();
     return jsop_getprop(name);
 }
@@ -8978,17 +9116,17 @@ IonBuilder::setPropTryScalarPropOfTypedO
 {
     // Must always be loading the same scalar type
     ScalarTypeDescr::Type fieldType;
     if (!fieldDescrs.scalarType(&fieldType))
         return true;
 
     // OK! Perform the optimization.
 
-    if (!storeScalarTypedObjectValue(obj, constantInt(fieldOffset), fieldType, true, value))
+    if (!storeScalarTypedObjectValue(obj, constantInt(fieldOffset), fieldType, true, false, value))
         return false;
 
     current->push(value);
 
     *emitted = true;
     return true;
 }
 
@@ -9994,37 +10132,40 @@ IonBuilder::typeObjectForFieldFromStruct
     MInstruction *unboxFieldType = MUnbox::New(alloc(), fieldType, MIRType_Object, MUnbox::Infallible);
     current->add(unboxFieldType);
 
     return unboxFieldType;
 }
 
 bool
 IonBuilder::storeScalarTypedObjectValue(MDefinition *typedObj,
-                                        MDefinition *offset,
+                                        MDefinition *byteOffset,
                                         ScalarTypeDescr::Type type,
                                         bool canBeNeutered,
+                                        bool racy,
                                         MDefinition *value)
 {
     // Find location within the owner object.
     MDefinition *elements, *scaledOffset;
     size_t alignment = ScalarTypeDescr::alignment(type);
-    loadTypedObjectElements(typedObj, offset, alignment, canBeNeutered,
+    loadTypedObjectElements(typedObj, byteOffset, alignment, canBeNeutered,
                             &elements, &scaledOffset);
 
     // Clamp value to [0, 255] when type is Uint8Clamped
     MDefinition *toWrite = value;
     if (type == ScalarTypeDescr::TYPE_UINT8_CLAMPED) {
         toWrite = MClampToUint8::New(alloc(), value);
         current->add(toWrite->toInstruction());
     }
 
     MStoreTypedArrayElement *store =
         MStoreTypedArrayElement::New(alloc(), elements, scaledOffset, toWrite,
                                      type);
+    if (racy)
+        store->setRacy();
     current->add(store);
 
     return true;
 }
 
 MConstant *
 IonBuilder::constant(const Value &v)
 {
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -105,16 +105,17 @@ class IonBuilder : public MIRGenerator
         jsbytecode *stopAt;     // Bytecode at which to stop the processing loop.
 
         // For if structures, this contains branch information.
         union {
             struct {
                 MBasicBlock *ifFalse;
                 jsbytecode *falseEnd;
                 MBasicBlock *ifTrue;    // Set when the end of the true path is reached.
+                MTest *test;
             } branch;
             struct {
                 // Common entry point.
                 MBasicBlock *entry;
 
                 // Whether OSR is being performed for this loop.
                 bool osr;
 
@@ -195,18 +196,18 @@ class IonBuilder : public MIRGenerator
               case FOR_LOOP_BODY:
               case FOR_LOOP_UPDATE:
                 return true;
               default:
                 return false;
             }
         }
 
-        static CFGState If(jsbytecode *join, MBasicBlock *ifFalse);
-        static CFGState IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MBasicBlock *ifFalse);
+        static CFGState If(jsbytecode *join, MTest *test);
+        static CFGState IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MTest *test);
         static CFGState AndOr(jsbytecode *join, MBasicBlock *joinStart);
         static CFGState TableSwitch(jsbytecode *exitpc, MTableSwitch *ins);
         static CFGState CondSwitch(IonBuilder *builder, jsbytecode *exitpc, jsbytecode *defaultTarget);
         static CFGState Label(jsbytecode *exitpc);
         static CFGState Try(jsbytecode *exitpc, MBasicBlock *successor);
     };
 
     static int CmpSuccessors(const void *a, const void *b);
@@ -332,31 +333,37 @@ class IonBuilder : public MIRGenerator
     void rewriteParameters();
     bool initScopeChain(MDefinition *callee = nullptr);
     bool initArgumentsObject();
     bool pushConstant(const Value &v);
 
     MConstant *constant(const Value &v);
     MConstant *constantInt(int32_t i);
 
+    // Filter the type information at tests
+    bool filterTypesAtTest(MTest *test);
+
     // Add a guard which ensure that the set of type which goes through this
     // generated code correspond to the observed types for the bytecode.
     bool pushTypeBarrier(MDefinition *def, types::TemporaryTypeSet *observed, bool needBarrier);
 
     // As pushTypeBarrier, but will compute the needBarrier boolean itself based
     // on observed and the JSFunction that we're planning to call. The
     // JSFunction must be a DOM method or getter.
     bool pushDOMTypeBarrier(MInstruction *ins, types::TemporaryTypeSet *observed, JSFunction* func);
 
     // If definiteType is not known or def already has the right type, just
     // returns def.  Otherwise, returns an MInstruction that has that definite
     // type, infallibly unboxing ins as needed.  The new instruction will be
     // added to |current| in this case.
     MDefinition *ensureDefiniteType(MDefinition* def, JSValueType definiteType);
 
+    // Creates a MDefinition based on the given def improved with type as TypeSet.
+    MDefinition *ensureDefiniteTypeSet(MDefinition* def, types::TemporaryTypeSet *types);
+
     JSObject *getSingletonPrototype(JSFunction *target);
 
     MDefinition *createThisScripted(MDefinition *callee);
     MDefinition *createThisScriptedSingleton(JSFunction *target, MDefinition *callee);
     MDefinition *createThis(JSFunction *target, MDefinition *callee);
     MInstruction *createDeclEnvObject(MDefinition *callee, MDefinition *scopeObj);
     MInstruction *createCallObject(MDefinition *callee, MDefinition *scopeObj);
 
@@ -454,16 +461,17 @@ class IonBuilder : public MIRGenerator
                                  MDefinition **ownerScaledOffset);
     MDefinition *typeObjectForElementFromArrayStructType(MDefinition *typedObj);
     MDefinition *typeObjectForFieldFromStructType(MDefinition *type,
                                                   size_t fieldIndex);
     bool storeScalarTypedObjectValue(MDefinition *typedObj,
                                      MDefinition *offset,
                                      ScalarTypeDescr::Type type,
                                      bool canBeNeutered,
+                                     bool racy,
                                      MDefinition *value);
     bool checkTypedObjectIndexInBounds(size_t elemSize,
                                        MDefinition *obj,
                                        MDefinition *index,
                                        TypeDescrSet objTypeDescrs,
                                        MDefinition **indexAsByteOffset,
                                        bool *canBeNeutered);
     bool pushDerivedTypedObject(bool *emitted,
@@ -560,16 +568,19 @@ class IonBuilder : public MIRGenerator
     bool jsop_getelem_typed(MDefinition *obj, MDefinition *index, ScalarTypeDescr::Type arrayType);
     bool jsop_setelem();
     bool jsop_setelem_dense(types::TemporaryTypeSet::DoubleConversion conversion,
                             SetElemSafety safety,
                             MDefinition *object, MDefinition *index, MDefinition *value);
     bool jsop_setelem_typed(ScalarTypeDescr::Type arrayType,
                             SetElemSafety safety,
                             MDefinition *object, MDefinition *index, MDefinition *value);
+    bool jsop_setelem_typed_object(ScalarTypeDescr::Type arrayType,
+                                   SetElemSafety safety, bool racy,
+                                   MDefinition *object, MDefinition *index, MDefinition *value);
     bool jsop_length();
     bool jsop_length_fastPath();
     bool jsop_arguments();
     bool jsop_arguments_length();
     bool jsop_arguments_getelem();
     bool jsop_runonce();
     bool jsop_rest();
     bool jsop_not();
@@ -662,29 +673,33 @@ class IonBuilder : public MIRGenerator
     InliningStatus inlineRegExpExec(CallInfo &callInfo);
     InliningStatus inlineRegExpTest(CallInfo &callInfo);
 
     // Array intrinsics.
     InliningStatus inlineUnsafePutElements(CallInfo &callInfo);
     bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
     bool inlineUnsafeSetTypedArrayElement(CallInfo &callInfo, uint32_t base,
                                           ScalarTypeDescr::Type arrayType);
+    bool inlineUnsafeSetTypedObjectArrayElement(CallInfo &callInfo, uint32_t base,
+                                                ScalarTypeDescr::Type arrayType);
     InliningStatus inlineNewDenseArray(CallInfo &callInfo);
     InliningStatus inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo);
     InliningStatus inlineNewDenseArrayForParallelExecution(CallInfo &callInfo);
 
     // Slot intrinsics.
     InliningStatus inlineUnsafeSetReservedSlot(CallInfo &callInfo);
     InliningStatus inlineUnsafeGetReservedSlot(CallInfo &callInfo);
 
     // ForkJoin intrinsics
     InliningStatus inlineForkJoinGetSlice(CallInfo &callInfo);
 
     // TypedObject intrinsics.
     InliningStatus inlineObjectIsTypeDescr(CallInfo &callInfo);
+    bool elementAccessIsTypedObjectArrayOfScalarType(MDefinition* obj, MDefinition* id,
+                                                     ScalarTypeDescr::Type *arrayType);
 
     // Utility intrinsics.
     InliningStatus inlineIsCallable(CallInfo &callInfo);
     InliningStatus inlineHaveSameClass(CallInfo &callInfo);
     InliningStatus inlineToObject(CallInfo &callInfo);
     InliningStatus inlineDump(CallInfo &callInfo);
     InliningStatus inlineHasClass(CallInfo &callInfo, const Class *clasp);
 
--- a/js/src/jit/IonLinker.h
+++ b/js/src/jit/IonLinker.h
@@ -73,17 +73,17 @@ class Linker
     Linker(MacroAssembler &masm)
       : masm(masm)
     {
         masm.finish();
     }
 
     template <AllowGC allowGC>
     JitCode *newCode(JSContext *cx, JSC::CodeKind kind) {
-        return newCode<allowGC>(cx, cx->compartment()->jitCompartment()->execAlloc(), kind);
+        return newCode<allowGC>(cx, cx->runtime()->jitRuntime()->execAlloc(), kind);
     }
 
     JitCode *newCodeForIonScript(JSContext *cx) {
 #ifdef JS_CODEGEN_ARM
         // ARM does not yet use implicit interrupt checks, see bug 864220.
         return newCode<CanGC>(cx, JSC::ION_CODE);
 #else
         // The caller must lock the runtime against interrupt requests, as the
--- a/js/src/jit/IonMacroAssembler.cpp
+++ b/js/src/jit/IonMacroAssembler.cpp
@@ -628,84 +628,211 @@ MacroAssembler::clampDoubleToUint8(Float
     {
         move32(Imm32(255), output);
     }
 
     bind(&done);
 #endif
 }
 
+// Inlined version of gc::CheckAllocatorState that checks the bare essentials
+// and bails for anything that cannot be handled with our jit allocators.
 void
-MacroAssembler::newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
-                           gc::InitialHeap initialHeap /* = gc::DefaultHeap */)
+MacroAssembler::checkAllocatorState(Label *fail)
 {
-    // Inlined equivalent of js::gc::NewGCThing() without failure case handling.
-
-    int thingSize = int(gc::Arena::thingSize(allocKind));
-
 #ifdef JS_GC_ZEAL
     // Don't execute the inline path if gcZeal is active.
     branch32(Assembler::NotEqual,
              AbsoluteAddress(GetIonContext()->runtime->addressOfGCZeal()), Imm32(0),
              fail);
 #endif
 
     // Don't execute the inline path if the compartment has an object metadata callback,
     // as the metadata to use for the object may vary between executions of the op.
     if (GetIonContext()->compartment->hasObjectMetadataCallback())
         jump(fail);
+}
 
+// Inline version of ShouldNurseryAllocate.
+bool
+MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap)
+{
+#ifdef JSGC_GENERATIONAL
+    // Note that Ion elides barriers on writes to objects know to be in the
+    // nursery, so any allocation that can be made into the nursery must be made
+    // into the nursery, even if the nursery is disabled. At runtime these will
+    // take the out-of-line path, which is required to insert a barrier for the
+    // initializing writes.
+    return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
+#else
+    return false;
+#endif
+}
+
+// Inline version of Nursery::allocateObject.
+void
+MacroAssembler::nurseryAllocate(Register result, Register slots, gc::AllocKind allocKind,
+                                size_t nDynamicSlots, gc::InitialHeap initialHeap, Label *fail)
+{
 #ifdef JSGC_GENERATIONAL
-    // Always use nursery allocation if it is possible to do so. The jit
-    // assumes a nursery pointer is returned to avoid barriers.
-    if (allocKind <= gc::FINALIZE_OBJECT_LAST && initialHeap != gc::TenuredHeap) {
-        // Inline Nursery::allocate. No explicit check for nursery.isEnabled()
-        // is needed, as the comparison with the nursery's end will always fail
-        // in such cases.
-        const Nursery &nursery = GetIonContext()->runtime->gcNursery();
-        loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
-        computeEffectiveAddress(Address(result, thingSize), temp);
-        branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
-        storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
-        return;
-    }
+    JS_ASSERT(IsNurseryAllocable(allocKind));
+    JS_ASSERT(initialHeap != gc::TenuredHeap);
+
+    // This allocation site is requesting too many dynamic slots. Several
+    // allocation sites assume that nursery allocation will succeed to
+    // avoid needing barriers later. Ensure these sites limit their slot
+    // requests appropriately.
+    JS_ASSERT(nDynamicSlots < Nursery::MaxNurserySlots);
+
+    // No explicit check for nursery.isEnabled() is needed, as the comparison
+    // with the nursery's end will always fail in such cases.
+    const Nursery &nursery = GetIonContext()->runtime->gcNursery();
+    Register temp = slots;
+    int thingSize = int(gc::Arena::thingSize(allocKind));
+    int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
+    loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
+    computeEffectiveAddress(Address(result, totalSize), temp);
+    branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
+    storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
+
+    if (nDynamicSlots)
+        computeEffectiveAddress(Address(result, thingSize), slots);
 #endif // JSGC_GENERATIONAL
-
-    CompileZone *zone = GetIonContext()->compartment->zone();
+}
 
-    // Inline FreeSpan::allocate.
-    // There is always exactly one FreeSpan per allocKind per JSCompartment.
-    // If a FreeSpan is replaced, its members are updated in the freeLists table,
-    // which the code below always re-reads.
+// Inlined version of FreeSpan::allocate.
+void
+MacroAssembler::freeSpanAllocate(Register result, Register temp, gc::AllocKind allocKind, Label *fail)
+{
+    CompileZone *zone = GetIonContext()->compartment->zone();
+    int thingSize = int(gc::Arena::thingSize(allocKind));
+
+    // Load FreeSpan::first of |zone|'s freeLists for |allocKind|. If there is
+    // no room remaining in the span, we bail to finish the allocation. The
+    // interpreter will call |refillFreeLists|, setting up a new FreeSpan so
+    // that we can continue allocating in the jit.
     loadPtr(AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)), result);
     branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(zone->addressOfFreeListLast(allocKind)), result, fail);
     computeEffectiveAddress(Address(result, thingSize), temp);
     storePtr(temp, AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)));
 }
 
 void
-MacroAssembler::newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
-                           gc::InitialHeap initialHeap)
+MacroAssembler::callMallocStub(size_t nbytes, Register result, Label *fail)
 {
-    gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
+    // This register must match the one in JitRuntime::generateMallocStub.
+    const Register regNBytes = CallTempReg0;
+
+    JS_ASSERT(nbytes > 0);
+    JS_ASSERT(nbytes <= INT32_MAX);
+
+    if (regNBytes != result)
+        push(regNBytes);
+    move32(Imm32(nbytes), regNBytes);
+    call(GetIonContext()->runtime->jitRuntime()->mallocStub());
+    if (regNBytes != result) {
+        movePtr(regNBytes, result);
+        pop(regNBytes);
+    }
+    branchTest32(Assembler::Zero, result, result, fail);
+}
+
+void
+MacroAssembler::callFreeStub(Register slots)
+{
+    // This register must match the one in JitRuntime::generateFreeStub.
+    const Register regSlots = CallTempReg0;
+
+    push(regSlots);
+    movePtr(slots, regSlots);
+    call(GetIonContext()->runtime->jitRuntime()->freeStub());
+    pop(regSlots);
+}
+
+// Inlined equivalent of gc::AllocateObject, without failure case handling.
+void
+MacroAssembler::allocateObject(Register result, Register slots, gc::AllocKind allocKind,
+                               uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label *fail)
+{
     JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
 
-    newGCThing(result, temp, allocKind, fail, initialHeap);
+    checkAllocatorState(fail);
+
+    if (shouldNurseryAllocate(allocKind, initialHeap))
+        return nurseryAllocate(result, slots, allocKind, nDynamicSlots, initialHeap, fail);
+
+    if (!nDynamicSlots)
+        return freeSpanAllocate(result, slots, allocKind, fail);
+
+    callMallocStub(nDynamicSlots * sizeof(HeapValue), slots, fail);
+
+    Label failAlloc;
+    Label success;
+
+    push(slots);
+    freeSpanAllocate(result, slots, allocKind, &failAlloc);
+    pop(slots);
+    jump(&success);
+
+    bind(&failAlloc);
+    pop(slots);
+    callFreeStub(slots);
+    jump(fail);
+
+    bind(&success);
+}
+
+void
+MacroAssembler::newGCThing(Register result, Register temp, JSObject *templateObj,
+                            gc::InitialHeap initialHeap, Label *fail)
+{
+    // This method does not initialize the object: if external slots get
+    // allocated into |temp|, there is no easy way for us to ensure the caller
+    // frees them. Instead just assert this case does not happen.
+    JS_ASSERT(!templateObj->numDynamicSlots());
+
+    gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
+    JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
+
+    allocateObject(result, temp, allocKind, templateObj->numDynamicSlots(), initialHeap, fail);
+}
+
+void
+MacroAssembler::createGCObject(Register obj, Register temp, JSObject *templateObj,
+                               gc::InitialHeap initialHeap, Label *fail)
+{
+    uint32_t nDynamicSlots = templateObj->numDynamicSlots();
+    gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
+    JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
+
+    allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
+    initGCThing(obj, temp, templateObj);
+}
+
+
+// Inlined equivalent of gc::AllocateNonObject, without failure case handling.
+// Non-object allocation does not need to worry about slots, so can take a
+// simpler path.
+void
+MacroAssembler::allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label *fail)
+{
+    checkAllocatorState(fail);
+    freeSpanAllocate(result, temp, allocKind, fail);
 }
 
 void
 MacroAssembler::newGCString(Register result, Register temp, Label *fail)
 {
-    newGCThing(result, temp, js::gc::FINALIZE_STRING, fail);
+    allocateNonObject(result, temp, js::gc::FINALIZE_STRING, fail);
 }
 
 void
 MacroAssembler::newGCShortString(Register result, Register temp, Label *fail)
 {
-    newGCThing(result, temp, js::gc::FINALIZE_SHORT_STRING, fail);
+    allocateNonObject(result, temp, js::gc::FINALIZE_SHORT_STRING, fail);
 }
 
 void
 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                               gc::AllocKind allocKind, Label *fail)
 {
     // Similar to ::newGCThing(), except that it allocates from a custom
     // Allocator in the ForkJoinContext*, rather than being hardcoded to the
@@ -751,16 +878,17 @@ MacroAssembler::newGCThingPar(Register r
 }
 
 void
 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                               JSObject *templateObject, Label *fail)
 {
     gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
     JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
+    JS_ASSERT(!templateObject->numDynamicSlots());
 
     newGCThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
 }
 
 void
 MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                                Label *fail)
 {
@@ -770,94 +898,115 @@ MacroAssembler::newGCStringPar(Register 
 void
 MacroAssembler::newGCShortStringPar(Register result, Register cx, Register tempReg1,
                                     Register tempReg2, Label *fail)
 {
     newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_SHORT_STRING, fail);
 }
 
 void
-MacroAssembler::copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
+MacroAssembler::copySlotsFromTemplate(Register obj, const JSObject *templateObj,
                                       uint32_t start, uint32_t end)
 {
     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
     for (unsigned i = start; i < nfixed; i++)
         storeValue(templateObj->getFixedSlot(i), Address(obj, JSObject::getFixedSlotOffset(i)));
 }
 
 void
-MacroAssembler::fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
-                                       uint32_t start, uint32_t end)
+MacroAssembler::fillSlotsWithUndefined(Address base, Register temp, uint32_t start, uint32_t end)
 {
 #ifdef JS_NUNBOX32
     // We only have a single spare register, so do the initialization as two
     // strided writes of the tag and body.
     jsval_layout jv = JSVAL_TO_IMPL(UndefinedValue());
-    uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
 
-    mov(ImmWord(jv.s.tag), temp);
-    for (unsigned i = start; i < nfixed; i++)
-        store32(temp, ToType(Address(obj, JSObject::getFixedSlotOffset(i))));
+    Address addr = base;
+    move32(Imm32(jv.s.payload.i32), temp);
+    for (unsigned i = start; i < end; ++i, addr.offset += sizeof(HeapValue))
+        store32(temp, ToPayload(addr));
 
-    mov(ImmWord(jv.s.payload.i32), temp);
-    for (unsigned i = start; i < nfixed; i++)
-        store32(temp, ToPayload(Address(obj, JSObject::getFixedSlotOffset(i))));
+    addr = base;
+    move32(Imm32(jv.s.tag), temp);
+    for (unsigned i = start; i < end; ++i, addr.offset += sizeof(HeapValue))
+        store32(temp, ToType(addr));
 #else
     moveValue(UndefinedValue(), temp);
-    uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
-    for (unsigned i = start; i < nfixed; i++)
-        storePtr(temp, Address(obj, JSObject::getFixedSlotOffset(i)));
+    for (uint32_t i = start; i < end; ++i, base.offset += sizeof(HeapValue))
+        storePtr(temp, base);
 #endif
 }
 
 static uint32_t
 FindStartOfUndefinedSlots(JSObject *templateObj, uint32_t nslots)
 {
     JS_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
     JS_ASSERT(nslots > 0);
     for (uint32_t first = nslots; first != 0; --first) {
         if (templateObj->getSlot(first - 1) != UndefinedValue())
             return first;
     }
     return 0;
 }
 
 void
-MacroAssembler::initGCSlots(Register obj, Register temp, JSObject *templateObj)
+MacroAssembler::initGCSlots(Register obj, Register slots, JSObject *templateObj)
 {
     // Slots of non-array objects are required to be initialized.
     // Use the values currently in the template object.
     uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
     if (nslots == 0)
         return;
 
+    uint32_t nfixed = templateObj->numFixedSlots();
+    uint32_t ndynamic = templateObj->numDynamicSlots();
+
     // Attempt to group slot writes such that we minimize the amount of
     // duplicated data we need to embed in code and load into registers. In
     // general, most template object slots will be undefined except for any
     // reserved slots. Since reserved slots come first, we split the object
     // logically into independent non-UndefinedValue writes to the head and
     // duplicated writes of UndefinedValue to the tail. For the majority of
     // objects, the "tail" will be the entire slot range.
     uint32_t startOfUndefined = FindStartOfUndefinedSlots(templateObj, nslots);
-    copySlotsFromTemplate(obj, temp, templateObj, 0, startOfUndefined);
-    fillSlotsWithUndefined(obj, temp, templateObj, startOfUndefined, nslots);
+    JS_ASSERT(startOfUndefined <= nfixed); // Reserved slots must be fixed.
+
+    // Copy over any preserved reserved slots.
+    copySlotsFromTemplate(obj, templateObj, 0, startOfUndefined);
+
+    // Fill the rest of the fixed slots with undefined.
+    fillSlotsWithUndefined(Address(obj, JSObject::getFixedSlotOffset(startOfUndefined)), slots,
+                           startOfUndefined, nfixed);
+
+    if (ndynamic) {
+        // We are short one register to do this elegantly. Borrow the obj
+        // register briefly for our slots base address.
+        push(obj);
+        loadPtr(Address(obj, JSObject::offsetOfSlots()), obj);
+        fillSlotsWithUndefined(Address(obj, 0), slots, 0, ndynamic);
+        pop(obj);
+    }
 }
 
 void
-MacroAssembler::initGCThing(Register obj, Register temp, JSObject *templateObj)
+MacroAssembler::initGCThing(Register obj, Register slots, JSObject *templateObj)
 {
-    // Fast initialization of an empty object returned by NewGCThing().
+    // Fast initialization of an empty object returned by allocateObject().
 
     JS_ASSERT(!templateObj->hasDynamicElements());
 
     storePtr(ImmGCPtr(templateObj->lastProperty()), Address(obj, JSObject::offsetOfShape()));
     storePtr(ImmGCPtr(templateObj->type()), Address(obj, JSObject::offsetOfType()));
-    storePtr(ImmPtr(nullptr), Address(obj, JSObject::offsetOfSlots()));
+    if (templateObj->hasDynamicSlots())
+        storePtr(slots, Address(obj, JSObject::offsetOfSlots()));
+    else
+        storePtr(ImmPtr(nullptr), Address(obj, JSObject::offsetOfSlots()));
 
     if (templateObj->is<ArrayObject>()) {
+        Register temp = slots;
         JS_ASSERT(!templateObj->getDenseInitializedLength());
 
         int elementsOffset = JSObject::offsetOfFixedElements();
 
         computeEffectiveAddress(Address(obj, elementsOffset), temp);
         storePtr(temp, Address(obj, JSObject::offsetOfElements()));
 
         // Fill in the elements header.
@@ -870,17 +1019,17 @@ MacroAssembler::initGCThing(Register obj
         store32(Imm32(templateObj->shouldConvertDoubleElements()
                       ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
                       : 0),
                 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
         JS_ASSERT(!templateObj->hasPrivate());
     } else {
         storePtr(ImmPtr(emptyObjectElements), Address(obj, JSObject::offsetOfElements()));
 
-        initGCSlots(obj, temp, templateObj);
+        initGCSlots(obj, slots, templateObj);
 
         if (templateObj->hasPrivate()) {
             uint32_t nfixed = templateObj->numFixedSlots();
             storePtr(ImmPtr(templateObj->getPrivate()),
                      Address(obj, JSObject::getPrivateDataOffset(nfixed)));
         }
     }
 }
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -781,38 +781,52 @@ class MacroAssembler : public MacroAssem
         bind(&done);
     }
 
     // Emit type case branch on tag matching if the type tag in the definition
     // might actually be that type.
     void branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag, Label *label);
 
     // Inline allocation.
-    void newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
-                    gc::InitialHeap initialHeap = gc::DefaultHeap);
-    void newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
-                    gc::InitialHeap initialHeap);
+  private:
+    void checkAllocatorState(Label *fail);
+    bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap);
+    void nurseryAllocate(Register result, Register slots, gc::AllocKind allocKind,
+                         size_t nDynamicSlots, gc::InitialHeap initialHeap, Label *fail);
+    void freeSpanAllocate(Register result, Register temp, gc::AllocKind allocKind, Label *fail);
+    void allocateObject(Register result, Register slots, gc::AllocKind allocKind,
+                        uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label *fail);
+    void allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label *fail);
+    void copySlotsFromTemplate(Register obj, const JSObject *templateObj,
+                               uint32_t start, uint32_t end);
+    void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start, uint32_t end);
+    void initGCSlots(Register obj, Register temp, JSObject *templateObj);
+
+  public:
+    void callMallocStub(size_t nbytes, Register result, Label *fail);
+    void callFreeStub(Register slots);
+    void createGCObject(Register result, Register temp, JSObject *templateObj,
+                        gc::InitialHeap initialHeap, Label *fail);
+
+    void newGCThing(Register result, Register temp, JSObject *templateObj,
+                     gc::InitialHeap initialHeap, Label *fail);
+    void initGCThing(Register obj, Register temp, JSObject *templateObj);
+
     void newGCString(Register result, Register temp, Label *fail);
     void newGCShortString(Register result, Register temp, Label *fail);
 
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        gc::AllocKind allocKind, Label *fail);
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        JSObject *templateObject, Label *fail);
     void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                         Label *fail);
     void newGCShortStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                              Label *fail);
 
-    void copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
-                               uint32_t start, uint32_t end);
-    void fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
-                                uint32_t start, uint32_t end);
-    void initGCSlots(Register obj, Register temp, JSObject *templateObj);
-    void initGCThing(Register obj, Register temp, JSObject *templateObj);
 
     // Compares two strings for equality based on the JSOP.
     // This checks for identical pointers, atoms and length and fails for everything else.
     void compareStrings(JSOp op, Register left, Register right, Register result,
                         Register temp, Label *fail);
 
     // Checks the flags that signal that parallel code may need to interrupt or
     // abort.  Branches to fail in that case.
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -181,16 +181,20 @@ class JitRuntime
 
     // Thunk that invalides an (Ion compiled) caller on the Ion stack.
     JitCode *invalidator_;
 
     // Thunk that calls the GC pre barrier.
     JitCode *valuePreBarrier_;
     JitCode *shapePreBarrier_;
 
+    // Thunk to call malloc/free.
+    JitCode *mallocStub_;
+    JitCode *freeStub_;
+
     // Thunk used by the debugger for breakpoint and step mode.
     JitCode *debugTrapHandler_;
 
     // Stub used to inline the ForkJoinGetSlice intrinsic.
     JitCode *forkJoinGetSliceStub_;
 
     // Map VMFunction addresses to the JitCode of the wrapper.
     typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap;
@@ -216,16 +220,18 @@ class JitRuntime
     JitCode *generateExceptionTailStub(JSContext *cx);
     JitCode *generateBailoutTailStub(JSContext *cx);
     JitCode *generateEnterJIT(JSContext *cx, EnterJitType type);
     JitCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut);
     JitCode *generateBailoutTable(JSContext *cx, uint32_t frameClass);
     JitCode *generateBailoutHandler(JSContext *cx);
     JitCode *generateInvalidator(JSContext *cx);
     JitCode *generatePreBarrier(JSContext *cx, MIRType type);
+    JitCode *generateMallocStub(JSContext *cx);
+    JitCode *generateFreeStub(JSContext *cx);
     JitCode *generateDebugTrapHandler(JSContext *cx);
     JitCode *generateForkJoinGetSliceStub(JSContext *cx);
     JitCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
 
     JSC::ExecutableAllocator *createIonAlloc(JSContext *cx);
 
   public:
     JitRuntime();
@@ -240,16 +246,20 @@ class JitRuntime
     AutoFlushCache *flusher() {
         return flusher_;
     }
     void setFlusher(AutoFlushCache *fl) {
         if (!flusher_ || !fl)
             flusher_ = fl;
     }
 
+    JSC::ExecutableAllocator *execAlloc() const {
+        return execAlloc_;
+    }
+
     JSC::ExecutableAllocator *getIonAlloc(JSContext *cx) {
         JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
         return ionAlloc_ ? ionAlloc_ : createIonAlloc(cx);
     }
 
     JSC::ExecutableAllocator *ionAlloc(JSRuntime *rt) {
         JS_ASSERT(rt->currentThreadOwnsInterruptLock());
         return ionAlloc_;
@@ -321,42 +331,55 @@ class JitRuntime
     JitCode *valuePreBarrier() const {
         return valuePreBarrier_;
     }
 
     JitCode *shapePreBarrier() const {
         return shapePreBarrier_;
     }
 
+    JitCode *mallocStub() const {
+        return mallocStub_;
+    }
+
+    JitCode *freeStub() const {
+        return freeStub_;
+    }
+
     bool ensureForkJoinGetSliceStubExists(JSContext *cx);
     JitCode *forkJoinGetSliceStub() const {
         return forkJoinGetSliceStub_;
     }
 };
 
+class JitZone
+{
+    // Allocated space for optimized baseline stubs.
+    OptimizedICStubSpace optimizedStubSpace_;
+
+  public:
+    OptimizedICStubSpace *optimizedStubSpace() {
+        return &optimizedStubSpace_;
+    }
+};
+
 class JitCompartment
 {
     friend class JitActivation;
 
-    // Ion state for the compartment's runtime.
-    JitRuntime *rt;
-
     // Map ICStub keys to ICStub shared code objects.
     typedef WeakValueCache<uint32_t, ReadBarriered<JitCode> > ICStubCodeMap;
     ICStubCodeMap *stubCodes_;
 
     // Keep track of offset into various baseline stubs' code at return
     // point from called script.
     void *baselineCallReturnAddr_;
     void *baselineGetPropReturnAddr_;
     void *baselineSetPropReturnAddr_;
 
-    // Allocated space for optimized baseline stubs.
-    OptimizedICStubSpace optimizedStubSpace_;
-
     // Stub to concatenate two strings inline. Note that it can't be
     // stored in JitRuntime because masm.newGCString bakes in zone-specific
     // pointers. This has to be a weak pointer to avoid keeping the whole
     // compartment alive.
     ReadBarriered<JitCode> stringConcatStub_;
     ReadBarriered<JitCode> parallelStringConcatStub_;
 
     JitCode *generateStringConcatStub(JSContext *cx, ExecutionMode mode);
@@ -401,48 +424,39 @@ class JitCompartment
         return baselineSetPropReturnAddr_;
     }
 
     void toggleBaselineStubBarriers(bool enabled);
 
     JSC::ExecutableAllocator *createIonAlloc();
 
   public:
-    JitCompartment(JitRuntime *rt);
+    JitCompartment();
     ~JitCompartment();
 
     bool initialize(JSContext *cx);
 
     // Initialize code stubs only used by Ion, not Baseline.
     bool ensureIonStubsExist(JSContext *cx);
 
     void mark(JSTracer *trc, JSCompartment *compartment);
     void sweep(FreeOp *fop);
 
-    JSC::ExecutableAllocator *execAlloc() {
-        return rt->execAlloc_;
-    }
-
     JitCode *stringConcatStub(ExecutionMode mode) const {
         switch (mode) {
           case SequentialExecution: return stringConcatStub_;
           case ParallelExecution:   return parallelStringConcatStub_;
           default:                  MOZ_ASSUME_UNREACHABLE("No such execution mode");
         }
     }
-
-    OptimizedICStubSpace *optimizedStubSpace() {
-        return &optimizedStubSpace_;
-    }
 };
 
 // Called from JSCompartment::discardJitCode().
 void InvalidateAll(FreeOp *fop, JS::Zone *zone);
 void FinishInvalidation(FreeOp *fop, JSScript *script);
-void FinishDiscardJitCode(FreeOp *fop, JSCompartment *comp);
 
 // On windows systems, really large frames need to be incrementally touched.
 // The following constant defines the minimum increment of the touch.
 #ifdef XP_WIN
 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
 #endif
 
 } // namespace jit
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -297,42 +297,16 @@ class LGoto : public LControlInstruction
          setSuccessor(0, block);
     }
 
     MBasicBlock *target() const {
         return getSuccessor(0);
     }
 };
 
-class LNewSlots : public LCallInstructionHelper<1, 0, 3>
-{
-  public:
-    LIR_HEADER(NewSlots)
-
-    LNewSlots(const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3) {
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-        setTemp(2, temp3);
-    }
-
-    const LDefinition *temp1() {
-        return getTemp(0);
-    }
-    const LDefinition *temp2() {
-        return getTemp(1);
-    }
-    const LDefinition *temp3() {
-        return getTemp(2);
-    }
-
-    MNewSlots *mir() const {
-        return mir_->toNewSlots();
-    }
-};
-
 class LNewArray : public LInstructionHelper<1, 0, 1>
 {
   public:
     LIR_HEADER(NewArray)
 
     LNewArray(const LDefinition &temp) {
         setTemp(0, temp);
     }
@@ -459,92 +433,62 @@ class LNewDeclEnvObject : public LInstru
         return getTemp(0);
     }
 
     MNewDeclEnvObject *mir() const {
         return mir_->toNewDeclEnvObject();
     }
 };
 
-// Allocates a new CallObject. The inputs are:
-//      slots: either a reg representing a HeapSlot *, or a placeholder
-//             meaning that no slots pointer is needed.
+// Allocates a new CallObject.
 //
 // This instruction generates two possible instruction sets:
 //   (1) If the call object is extensible, this is a callVM to create the
 //       call object.
 //   (2) Otherwise, an inline allocation of the call object is attempted.
 //
-class LNewCallObject : public LInstructionHelper<1, 1, 1>
+class LNewCallObject : public LInstructionHelper<1, 0, 1>
 {
   public:
     LIR_HEADER(NewCallObject)
 
-    LNewCallObject(const LAllocation &slots, const LDefinition &temp) {
-        setOperand(0, slots);
+    LNewCallObject(const LDefinition &temp) {
         setTemp(0, temp);
     }
 
     const LDefinition *temp() {
         return getTemp(0);
     }
 
-    const LAllocation *slots() {
-        return getOperand(0);
-    }
     MNewCallObject *mir() const {
         return mir_->toNewCallObject();
     }
 };
 
-class LNewCallObjectPar : public LInstructionHelper<1, 2, 2>
-{
-    LNewCallObjectPar(const LAllocation &cx, const LAllocation &slots,
-                      const LDefinition &temp1, const LDefinition &temp2)
-    {
+class LNewCallObjectPar : public LInstructionHelper<1, 1, 2>
+{
+    LNewCallObjectPar(const LAllocation &cx, const LDefinition &temp1, const LDefinition &temp2) {
         setOperand(0, cx);
-        setOperand(1, slots);
         setTemp(0, temp1);
         setTemp(1, temp2);
     }
 
 public:
     LIR_HEADER(NewCallObjectPar);
 
-    static LNewCallObjectPar *NewWithSlots(TempAllocator &alloc,
-                                           const LAllocation &cx, const LAllocation &slots,
-                                           const LDefinition &temp1, const LDefinition &temp2)
+    static LNewCallObjectPar *New(TempAllocator &alloc, const LAllocation &cx,
+                                  const LDefinition &temp1, const LDefinition &temp2)
     {
-        return new(alloc) LNewCallObjectPar(cx, slots, temp1, temp2);
-    }
-
-    static LNewCallObjectPar *NewSansSlots(TempAllocator &alloc,
-                                           const LAllocation &cx,
-                                           const LDefinition &temp1, const LDefinition &temp2)
-    {
-        LAllocation slots = LConstantIndex::Bogus();
-        return new(alloc) LNewCallObjectPar(cx, slots, temp1, temp2);
+        return new(alloc) LNewCallObjectPar(cx, temp1, temp2);
     }
 
     const LAllocation *forkJoinContext() {
         return getOperand(0);
     }
 
-    const LAllocation *slots() {
-        return getOperand(1);
-    }
-
-    const bool hasDynamicSlots() {
-        // TO INVESTIGATE: Felix tried using isRegister() method here,
-        // but for useFixed(_, CallTempN), isRegister() is false (and
-        // isUse() is true).  So for now ignore that and try to match
-        // the LConstantIndex::Bogus() generated above instead.
-        return slots() && ! slots()->isConstant();
-    }
-
     const MNewCallObjectPar *mir() const {
         return mir_->toNewCallObjectPar();
     }
 
     const LDefinition *getTemp0() {
         return getTemp(0);
     }
 
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -20,17 +20,16 @@
     _(CloneLiteral)                 \
     _(Parameter)                    \
     _(Callee)                       \
     _(TableSwitch)                  \
     _(TableSwitchV)                 \
     _(Goto)                         \
     _(NewArray)                     \
     _(NewObject)                    \
-    _(NewSlots)                     \
     _(NewDeclEnvObject)             \
     _(NewCallObject)                \
     _(NewStringObject)              \
     _(NewPar)                       \
     _(NewDenseArrayPar)             \
     _(NewCallObjectPar)             \
     _(NewDerivedTypedObject)        \
     _(AbortPar)                     \
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -156,27 +156,16 @@ LIRGenerator::visitDefVar(MDefVar *ins)
 bool
 LIRGenerator::visitDefFun(MDefFun *ins)
 {
     LDefFun *lir = new(alloc()) LDefFun(useRegisterAtStart(ins->scopeChain()));
     return add(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
-LIRGenerator::visitNewSlots(MNewSlots *ins)
-{
-    // No safepoint needed, since we don't pass a cx.
-    LNewSlots *lir = new(alloc()) LNewSlots(tempFixed(CallTempReg0), tempFixed(CallTempReg1),
-                                            tempFixed(CallTempReg2));
-    if (!assignSnapshot(lir))
-        return false;
-    return defineReturn(lir, ins);
-}
-
-bool
 LIRGenerator::visitNewArray(MNewArray *ins)
 {
     LNewArray *lir = new(alloc()) LNewArray(temp());
     return define(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewObject(MNewObject *ins)
@@ -190,23 +179,17 @@ LIRGenerator::visitNewDeclEnvObject(MNew
 {
     LNewDeclEnvObject *lir = new(alloc()) LNewDeclEnvObject(temp());
     return define(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewCallObject(MNewCallObject *ins)
 {
-    LAllocation slots;
-    if (ins->slots()->type() == MIRType_Slots)
-        slots = useRegister(ins->slots());
-    else
-        slots = LConstantIndex::Bogus();
-
-    LNewCallObject *lir = new(alloc()) LNewCallObject(slots, temp());
+    LNewCallObject *lir = new(alloc()) LNewCallObject(temp());
     if (!define(lir, ins))
         return false;
 
     if (!assignSafepoint(lir, ins))
         return false;
 
     return true;
 }
@@ -220,27 +203,17 @@ LIRGenerator::visitNewDerivedTypedObject
                                             useRegisterAtStart(ins->offset()));
     return defineReturn(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewCallObjectPar(MNewCallObjectPar *ins)
 {
     const LAllocation &parThreadContext = useRegister(ins->forkJoinContext());
-    const LDefinition &temp1 = temp();
-    const LDefinition &temp2 = temp();
-
-    LNewCallObjectPar *lir;
-    if (ins->slots()->type() == MIRType_Slots) {
-        const LAllocation &slots = useRegister(ins->slots());
-        lir = LNewCallObjectPar::NewWithSlots(alloc(), parThreadContext, slots, temp1, temp2);
-    } else {
-        lir = LNewCallObjectPar::NewSansSlots(alloc(), parThreadContext, temp1, temp2);
-    }
-
+    LNewCallObjectPar *lir = LNewCallObjectPar::New(alloc(), parThreadContext, temp(), temp());
     return define(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewStringObject(MNewStringObject *ins)
 {
     JS_ASSERT(ins->input()->type() == MIRType_String);
 
@@ -2201,16 +2174,22 @@ LIRGenerator::visitStoreSlot(MStoreSlot 
         return add(new(alloc()) LStoreSlotT(useRegister(ins->slots()), useRegisterOrConstant(ins->value())),
                    ins);
     }
 
     return true;
 }
 
 bool
+LIRGenerator::visitFilterTypeSet(MFilterTypeSet *ins)
+{
+    return redefine(ins, ins->input());
+}
+
+bool
 LIRGenerator::visitTypeBarrier(MTypeBarrier *ins)
 {
     // Requesting a non-GC pointer is safe here since we never re-enter C++
     // from inside a type barrier test.
 
     const types::TemporaryTypeSet *types = ins->resultTypeSet();
     bool needTemp = !types->unknownObject() && types->getObjectCount() > 0;
 
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -61,17 +61,16 @@ class LIRGenerator : public LIRGenerator
 
     // Visitor hooks are explicit, to give CPU-specific versions a chance to
     // intercept without a bunch of explicit gunk in the .cpp.
     bool visitCloneLiteral(MCloneLiteral *ins);
     bool visitParameter(MParameter *param);
     bool visitCallee(MCallee *callee);
     bool visitGoto(MGoto *ins);
     bool visitTableSwitch(MTableSwitch *tableswitch);
-    bool visitNewSlots(MNewSlots *ins);
     bool visitNewArray(MNewArray *ins);
     bool visitNewObject(MNewObject *ins);
     bool visitNewDeclEnvObject(MNewDeclEnvObject *ins);
     bool visitNewCallObject(MNewCallObject *ins);
     bool visitNewStringObject(MNewStringObject *ins);
     bool visitNewDerivedTypedObject(MNewDerivedTypedObject *ins);
     bool visitNewPar(MNewPar *ins);
     bool visitNewCallObjectPar(MNewCallObjectPar *ins);
@@ -161,16 +160,17 @@ class LIRGenerator : public LIRGenerator
     bool visitMaybeToDoubleElement(MMaybeToDoubleElement *ins);
     bool visitLoadSlot(MLoadSlot *ins);
     bool visitFunctionEnvironment(MFunctionEnvironment *ins);
     bool visitForkJoinContext(MForkJoinContext *ins);
     bool visitGuardThreadExclusive(MGuardThreadExclusive *ins);
     bool visitInterruptCheck(MInterruptCheck *ins);
     bool visitInterruptCheckPar(MInterruptCheckPar *ins);
     bool visitStoreSlot(MStoreSlot *ins);
+    bool visitFilterTypeSet(MFilterTypeSet *ins);
     bool visitTypeBarrier(MTypeBarrier *ins);
     bool visitMonitorTypes(MMonitorTypes *ins);
     bool visitPostWriteBarrier(MPostWriteBarrier *ins);
     bool visitArrayLength(MArrayLength *ins);
     bool visitSetArrayLength(MSetArrayLength *ins);
     bool visitTypedArrayLength(MTypedArrayLength *ins);
     bool visitTypedArrayElements(MTypedArrayElements *ins);
     bool visitNeuterCheck(MNeuterCheck *lir);
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -1278,20 +1278,21 @@ IonBuilder::inlineUnsafePutElements(Call
         bool writeNeedsBarrier = false;
         if (isDenseNative) {
             writeNeedsBarrier = PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current,
                                                               &obj, nullptr, &elem,
                                                               /* canModify = */ false);
         }
 
         // We can only inline setelem on dense arrays that do not need type
-        // barriers and on typed arrays.
+        // barriers and on typed arrays and on typed object arrays.
         ScalarTypeDescr::Type arrayType;
         if ((!isDenseNative || writeNeedsBarrier) &&
-            !ElementAccessIsTypedArray(obj, id, &arrayType))
+            !ElementAccessIsTypedArray(obj, id, &arrayType) &&
+            !elementAccessIsTypedObjectArrayOfScalarType(obj, id, &arrayType))
         {
             return InliningStatus_NotInlined;
         }
     }
 
     callInfo.setImplicitlyUsedUnchecked();
 
     // Push the result first so that the stack depth matches up for
@@ -1315,23 +1316,58 @@ IonBuilder::inlineUnsafePutElements(Call
 
         ScalarTypeDescr::Type arrayType;
         if (ElementAccessIsTypedArray(obj, id, &arrayType)) {
             if (!inlineUnsafeSetTypedArrayElement(callInfo, base, arrayType))
                 return InliningStatus_Error;
             continue;
         }
 
+        if (elementAccessIsTypedObjectArrayOfScalarType(obj, id, &arrayType)) {
+            if (!inlineUnsafeSetTypedObjectArrayElement(callInfo, base, arrayType))
+                return InliningStatus_Error;
+            continue;
+        }
+
         MOZ_ASSUME_UNREACHABLE("Element access not dense array nor typed array");
     }
 
     return InliningStatus_Inlined;
 }
 
 bool
+IonBuilder::elementAccessIsTypedObjectArrayOfScalarType(MDefinition* obj, MDefinition* id,
+                                                        ScalarTypeDescr::Type *arrayType)
+{
+    if (obj->type() != MIRType_Object) // lookupTypeDescrSet() tests for TypedObject
+        return false;
+
+    if (id->type() != MIRType_Int32 && id->type() != MIRType_Double)
+        return false;
+
+    TypeDescrSet objDescrs;
+    if (!lookupTypeDescrSet(obj, &objDescrs))
+        return false;
+
+    if (!objDescrs.allOfArrayKind())
+        return false;
+
+    TypeDescrSet elemDescrs;
+    if (!objDescrs.arrayElementType(*this, &elemDescrs))
+        return false;
+
+    if (elemDescrs.empty() || elemDescrs.kind() != TypeDescr::Scalar)
+        return false;
+
+    JS_ASSERT(TypeDescr::isSized(elemDescrs.kind()));
+
+    return elemDescrs.scalarType(arrayType);
+}
+
+bool
 IonBuilder::inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base)
 {
     // Note: we do not check the conditions that are asserted as true
     // in intrinsic_UnsafePutElements():
     // - arr is a dense array
     // - idx < initialized length
     // Furthermore, note that inlineUnsafePutElements ensures the type of the
     // value is reflected in the JSID_VOID property of the array.
@@ -1362,16 +1398,36 @@ IonBuilder::inlineUnsafeSetTypedArrayEle
     MDefinition *elem = callInfo.getArg(base + 2);
 
     if (!jsop_setelem_typed(arrayType, SetElem_Unsafe, obj, id, elem))
         return false;
 
     return true;
 }
 
+bool
+IonBuilder::inlineUnsafeSetTypedObjectArrayElement(CallInfo &callInfo,
+                                                   uint32_t base,
+                                                   ScalarTypeDescr::Type arrayType)
+{
+    // Note: we do not check the conditions that are asserted as true
+    // in intrinsic_UnsafePutElements():
+    // - arr is a typed array
+    // - idx < length
+
+    MDefinition *obj = callInfo.getArg(base + 0);
+    MDefinition *id = callInfo.getArg(base + 1);
+    MDefinition *elem = callInfo.getArg(base + 2);
+
+    if (!jsop_setelem_typed_object(arrayType, SetElem_Unsafe, true, obj, id, elem))
+        return false;
+
+    return true;
+}
+
 IonBuilder::InliningStatus
 IonBuilder::inlineForceSequentialOrInParallelSection(CallInfo &callInfo)
 {
     if (callInfo.constructing())
         return InliningStatus_NotInlined;
 
     ExecutionMode executionMode = info().executionMode();
     switch (executionMode) {
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -220,16 +220,22 @@ MaybeCallable(MDefinition *op)
 
     types::TemporaryTypeSet *types = op->resultTypeSet();
     if (!types)
         return true;
 
     return types->maybeCallable();
 }
 
+MTest *
+MTest::New(TempAllocator &alloc, MDefinition *ins, MBasicBlock *ifTrue, MBasicBlock *ifFalse)
+{
+    return new(alloc) MTest(ins, ifTrue, ifFalse);
+}
+
 void
 MTest::infer()
 {
     JS_ASSERT(operandMightEmulateUndefined());
 
     if (!MaybeEmulatesUndefined(getOperand(0)))
         markOperandCantEmulateUndefined();
 }
@@ -241,16 +247,42 @@ MTest::foldsTo(TempAllocator &alloc, boo
 
     if (op->isNot())
         return MTest::New(alloc, op->toNot()->operand(), ifFalse(), ifTrue());
 
     return this;
 }
 
 void
+MTest::filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
+                              bool *filtersNull)
+{
+    MDefinition *ins = getOperand(0);
+    if (ins->isCompare()) {
+        ins->toCompare()->filtersUndefinedOrNull(trueBranch, subject, filtersUndefined, filtersNull);
+        return;
+    }
+
+    if (!trueBranch && ins->isNot()) {
+        *subject = ins->getOperand(0);
+        *filtersUndefined = *filtersNull = true;
+        return;
+    }
+
+    if (trueBranch) {
+        *subject = ins;
+        *filtersUndefined = *filtersNull = true;
+        return;
+    }
+
+    *filtersUndefined = *filtersNull = false;
+    *subject = nullptr;
+}
+
+void
 MDefinition::printOpcode(FILE *fp) const
 {
     PrintOpcodeName(fp, op());
     for (size_t j = 0, e = numOperands(); j < e; j++) {
         fprintf(fp, " ");
         getOperand(j)->printName(fp);
     }
 }
@@ -821,22 +853,16 @@ MRound::trySpecializeFloat32(TempAllocat
         if (input()->type() == MIRType_Float32)
             ConvertDefinitionToDouble<0>(alloc, input(), this);
         return;
     }
 
     setPolicyType(MIRType_Float32);
 }
 
-MTest *
-MTest::New(TempAllocator &alloc, MDefinition *ins, MBasicBlock *ifTrue, MBasicBlock *ifFalse)
-{
-    return new(alloc) MTest(ins, ifTrue, ifFalse);
-}
-
 MCompare *
 MCompare::New(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op)
 {
     return new(alloc) MCompare(left, right, op);
 }
 
 MCompare *
 MCompare::NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op,
@@ -2563,16 +2589,47 @@ MCompare::trySpecializeFloat32(TempAlloc
         if (lhs->type() == MIRType_Float32)
             ConvertDefinitionToDouble<0>(alloc, lhs, this);
         if (rhs->type() == MIRType_Float32)
             ConvertDefinitionToDouble<1>(alloc, rhs, this);
     }
 }
 
 void
+MCompare::filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
+                                 bool *filtersNull)
+{
+    *filtersNull = *filtersUndefined = false;
+    *subject = nullptr;
+
+    if (compareType() != Compare_Undefined && compareType() != Compare_Null)
+        return;
+
+    JS_ASSERT(jsop() == JSOP_STRICTNE || jsop() == JSOP_NE ||
+              jsop() == JSOP_STRICTEQ || jsop() == JSOP_EQ);
+
+    // JSOP_*NE only removes undefined/null from if/true branch
+    if (!trueBranch && (jsop() == JSOP_STRICTNE || jsop() == JSOP_NE))
+        return;
+
+    // JSOP_*EQ only removes undefined/null from else/false branch
+    if (trueBranch && (jsop() == JSOP_STRICTEQ || jsop() == JSOP_EQ))
+        return;
+
+    if (jsop() == JSOP_STRICTEQ || jsop() == JSOP_STRICTNE) {
+        *filtersUndefined = compareType() == Compare_Undefined;
+        *filtersNull = compareType() == Compare_Null;
+    } else {
+        *filtersUndefined = *filtersNull = true;
+    }
+
+    *subject = lhs();
+}
+
+void
 MNot::infer()
 {
     JS_ASSERT(operandMightEmulateUndefined());
 
     if (!MaybeEmulatesUndefined(getOperand(0)))
         markOperandCantEmulateUndefined();
 }
 
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -1316,16 +1316,18 @@ class MTest
         return this;
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     void infer();
     MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    void filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
+                                bool *filtersNull);
 
     void markOperandCantEmulateUndefined() {
         operandMightEmulateUndefined_ = false;
     }
     bool operandMightEmulateUndefined() const {
         return operandMightEmulateUndefined_;
     }
 #ifdef DEBUG
@@ -2273,16 +2275,18 @@ class MCompare
     INSTRUCTION_HEADER(Compare)
     static MCompare *New(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op);
     static MCompare *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right, JSOp op,
                               CompareType compareType);
 
     bool tryFold(bool *result);
     bool evaluateConstantOperands(bool *result);
     MDefinition *foldsTo(TempAllocator &alloc, bool useValueNumbers);
+    void filtersUndefinedOrNull(bool trueBranch, MDefinition **subject, bool *filtersUndefined,
+                                bool *filtersNull);
 
     void infer(BaselineInspector *inspector, jsbytecode *pc);
     CompareType compareType() const {
         return compareType_;
     }
     bool isInt32Comparison() const {
         return compareType() == Compare_Int32 ||
                compareType() == Compare_Int32MaybeCoerceBoth ||
@@ -8801,16 +8805,47 @@ class MGuardThreadExclusive
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     bool possiblyCalls() const {
         return true;
     }
 };
 
+class MFilterTypeSet
+  : public MUnaryInstruction
+{
+    MFilterTypeSet(MDefinition *def, types::TemporaryTypeSet *types)
+      : MUnaryInstruction(def)
+    {
+        JS_ASSERT(!types->unknown());
+
+        MIRType type = MIRTypeFromValueType(types->getKnownTypeTag());
+        setResultType(type);
+        setResultTypeSet(types);
+    }
+
+  public:
+    INSTRUCTION_HEADER(FilterTypeSet)
+
+    static MFilterTypeSet *New(TempAllocator &alloc, MDefinition *def, types::TemporaryTypeSet *types) {
+        return new(alloc) MFilterTypeSet(def, types);
+    }
+
+    bool congruentTo(MDefinition *def) const {
+        return false;
+    }
+    AliasSet getAliasSet() const {
+        return AliasSet::None();
+    }
+    virtual bool neverHoist() const {
+        return resultTypeSet()->empty();
+    }
+};
+
 // Given a value, guard that the value is in a particular TypeSet, then returns
 // that value.
 class MTypeBarrier
   : public MUnaryInstruction,
     public TypeBarrierPolicy
 {
     MTypeBarrier(MDefinition *def, types::TemporaryTypeSet *types)
       : MUnaryInstruction(def)
@@ -8927,43 +8962,16 @@ class MPostWriteBarrier : public MBinary
     bool isConsistentFloat32Use(MUse *use) const {
         // During lowering, values that neither have object nor value MIR type
         // are ignored, thus Float32 can show up at this point without any issue.
         return use->index() == 1;
     }
 #endif
 };
 
-class MNewSlots : public MNullaryInstruction
-{
-    unsigned nslots_;
-
-    MNewSlots(unsigned nslots)
-      : nslots_(nslots)
-    {
-        setResultType(MIRType_Slots);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewSlots)
-
-    static MNewSlots *New(TempAllocator &alloc, unsigned nslots) {
-        return new(alloc) MNewSlots(nslots);
-    }
-    unsigned nslots() const {
-        return nslots_;
-    }
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const {
-        return true;
-    }
-};
-
 class MNewDeclEnvObject : public MNullaryInstruction
 {
     CompilerRootObject templateObj_;
 
     MNewDeclEnvObject(JSObject *templateObj)
       : MNullaryInstruction(),
         templateObj_(templateObj)
     {
@@ -8980,78 +8988,70 @@ class MNewDeclEnvObject : public MNullar
     JSObject *templateObj() {
         return templateObj_;
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
 
-class MNewCallObject : public MUnaryInstruction
+class MNewCallObject : public MNullaryInstruction
 {
     CompilerRootObject templateObj_;
     bool needsSingletonType_;
 
-    MNewCallObject(JSObject *templateObj, bool needsSingletonType, MDefinition *slots)
-      : MUnaryInstruction(slots),
+    MNewCallObject(JSObject *templateObj, bool needsSingletonType)
+      : MNullaryInstruction(),
         templateObj_(templateObj),
         needsSingletonType_(needsSingletonType)
     {
         setResultType(MIRType_Object);
     }
 
   public:
     INSTRUCTION_HEADER(NewCallObject)
 
-    static MNewCallObject *New(TempAllocator &alloc, JSObject *templateObj, bool needsSingletonType,
-                               MDefinition *slots)
-    {
-        return new(alloc) MNewCallObject(templateObj, needsSingletonType, slots);
-    }
-
-    MDefinition *slots() {
-        return getOperand(0);
-    }
+    static MNewCallObject *New(TempAllocator &alloc, JSObject *templateObj, bool needsSingletonType)
+    {
+        return new(alloc) MNewCallObject(templateObj, needsSingletonType);
+    }
+
     JSObject *templateObject() {
         return templateObj_;
     }
     bool needsSingletonType() const {
         return needsSingletonType_;
     }
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
 
-class MNewCallObjectPar : public MBinaryInstruction
+class MNewCallObjectPar : public MUnaryInstruction
 {
     CompilerRootObject templateObj_;
 
-    MNewCallObjectPar(MDefinition *cx, JSObject *templateObj, MDefinition *slots)
-        : MBinaryInstruction(cx, slots),
+    MNewCallObjectPar(MDefinition *cx, JSObject *templateObj)
+        : MUnaryInstruction(cx),
           templateObj_(templateObj)
     {
         setResultType(MIRType_Object);
     }
 
   public:
     INSTRUCTION_HEADER(NewCallObjectPar);
 
     static MNewCallObjectPar *New(TempAllocator &alloc, MDefinition *cx, MNewCallObject *callObj) {
-        return new(alloc) MNewCallObjectPar(cx, callObj->templateObject(), callObj->slots());
+        return new(alloc) MNewCallObjectPar(cx, callObj->templateObject());
     }
 
     MDefinition *forkJoinContext() const {
         return getOperand(0);
     }
 
-    MDefinition *slots() const {
-        return getOperand(1);
-    }
-
     JSObject *templateObj() const {
         return templateObj_;
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -80,17 +80,16 @@ namespace jit {
     _(GuardObject)                                                          \
     _(GuardString)                                                          \
     _(AssertRange)                                                          \
     _(ToDouble)                                                             \
     _(ToFloat32)                                                            \
     _(ToInt32)                                                              \
     _(TruncateToInt32)                                                      \
     _(ToString)                                                             \
-    _(NewSlots)                                                             \
     _(NewArray)                                                             \
     _(NewObject)                                                            \
     _(NewDeclEnvObject)                                                     \
     _(NewCallObject)                                                        \
     _(NewStringObject)                                                      \
     _(InitElem)                                                             \
     _(InitElemGetterSetter)                                                 \
     _(MutateProto)                                                          \
@@ -109,16 +108,17 @@ namespace jit {
     _(Slots)                                                                \
     _(Elements)                                                             \
     _(ConstantElements)                                                     \
     _(ConvertElementsToDoubles)                                             \
     _(MaybeToDoubleElement)                                                 \
     _(LoadSlot)                                                             \
     _(StoreSlot)                                                            \
     _(FunctionEnvironment)                                                  \
+    _(FilterTypeSet)                                                        \
     _(TypeBarrier)                                                          \
     _(MonitorTypes)                                                         \
     _(PostWriteBarrier)                                                     \
     _(GetPropertyCache)                                                     \
     _(GetPropertyPolymorphic)                                               \
     _(SetPropertyPolymorphic)                                               \
     _(GetElementCache)                                                      \
     _(SetElementCache)                                                      \
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -174,17 +174,16 @@ class ParallelSafetyVisitor : public MIn
     SAFE_OP(Unbox)
     SAFE_OP(GuardObject)
     SAFE_OP(ToDouble)
     SAFE_OP(ToFloat32)
     SAFE_OP(ToInt32)
     SAFE_OP(TruncateToInt32)
     SAFE_OP(MaybeToDoubleElement)
     CUSTOM_OP(ToString)
-    SAFE_OP(NewSlots)
     CUSTOM_OP(NewArray)
     CUSTOM_OP(NewObject)
     CUSTOM_OP(NewCallObject)
     UNSAFE_OP(NewDerivedTypedObject)
     UNSAFE_OP(InitElem)
     UNSAFE_OP(InitElemGetterSetter)
     UNSAFE_OP(MutateProto)
     UNSAFE_OP(InitProp)
@@ -196,16 +195,17 @@ class ParallelSafetyVisitor : public MIn
     CUSTOM_OP(Lambda)
     UNSAFE_OP(ImplicitThis)
     SAFE_OP(Slots)
     SAFE_OP(Elements)
     SAFE_OP(ConstantElements)
     SAFE_OP(LoadSlot)
     WRITE_GUARDED_OP(StoreSlot, slots)
     SAFE_OP(FunctionEnvironment) // just a load of func env ptr
+    SAFE_OP(FilterTypeSet)
     SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
     SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
     UNSAFE_OP(PostWriteBarrier)
     SAFE_OP(GetPropertyCache)
     SAFE_OP(GetPropertyPolymorphic)
     UNSAFE_OP(SetPropertyPolymorphic)
     SAFE_OP(GetElementCache)
     WRITE_GUARDED_OP(SetElementCache, object)
@@ -521,16 +521,20 @@ bool
 ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins)
 {
     return replaceWithNewPar(ins, ins->templateObject());
 }
 
 bool
 ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins)
 {
+    if (ins->templateObject()->hasDynamicSlots()) {
+        SpewMIR(ins, "call with dynamic slots");
+        return markUnsafe();
+    }
     replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins));
     return true;
 }
 
 bool
 ParallelSafetyVisitor::visitLambda(MLambda *ins)
 {
     if (ins->info().singletonType || ins->info().useNewTypeForClone) {
@@ -635,21 +639,16 @@ ParallelSafetyVisitor::insertWriteGuard(
         break;
 
       case MIRType_Slots:
         switch (valueBeingWritten->op()) {
           case MDefinition::Op_Slots:
             object = valueBeingWritten->toSlots()->object();
             break;
 
-          case MDefinition::Op_NewSlots:
-            // Values produced by new slots will ALWAYS be
-            // thread-local.
-            return true;
-
           default:
             SpewMIR(writeInstruction, "cannot insert write guard for %s",
                     valueBeingWritten->opName());
             return markUnsafe();
         }
         break;
 
       case MIRType_Elements:
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -504,36 +504,26 @@ InterruptCheck(JSContext *cx)
     // afterwards which point to the interrupt handler, the next time they are
     // taken the backedges will just be reset again.
     cx->runtime()->jitRuntime()->patchIonBackedges(cx->runtime(),
                                                    JitRuntime::BackedgeLoopHeader);
 
     return CheckForInterrupt(cx);
 }
 
-HeapSlot *
-NewSlots(JSRuntime *rt, unsigned nslots)
+void *
+MallocWrapper(JSRuntime *rt, size_t nbytes)
 {
-    JS_STATIC_ASSERT(sizeof(Value) == sizeof(HeapSlot));
-
-    Value *slots = reinterpret_cast<Value *>(rt->malloc_(nslots * sizeof(Value)));
-    if (!slots)
-        return nullptr;
-
-    for (unsigned i = 0; i < nslots; i++)
-        slots[i] = UndefinedValue();
-
-    return reinterpret_cast<HeapSlot *>(slots);
+    return rt->pod_malloc<uint8_t>(nbytes);
 }
 
 JSObject *
-NewCallObject(JSContext *cx, HandleScript script,
-              HandleShape shape, HandleTypeObject type, HeapSlot *slots)
+NewCallObject(JSContext *cx, HandleScript script, HandleShape shape, HandleTypeObject type)
 {
-    JSObject *obj = CallObject::create(cx, script, shape, type, slots);
+    JSObject *obj = CallObject::create(cx, script, shape, type);
     if (!obj)
         return nullptr;
 
 #ifdef JSGC_GENERATIONAL
     // The JIT creates call objects in the nursery, so elides barriers for
     // the initializing writes. The interpreter, however, may have allocated
     // the call object tenured, so barrier as needed before re-entering.
     if (!IsInsideNursery(cx->runtime(), obj))
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -612,19 +612,19 @@ JSObject *ArrayConcatDense(JSContext *cx
 bool CharCodeAt(JSContext *cx, HandleString str, int32_t index, uint32_t *code);
 JSFlatString *StringFromCharCode(JSContext *cx, int32_t code);
 
 bool SetProperty(JSContext *cx, HandleObject obj, HandlePropertyName name, HandleValue value,
                  bool strict, jsbytecode *pc);
 
 bool InterruptCheck(JSContext *cx);
 
-HeapSlot *NewSlots(JSRuntime *rt, unsigned nslots);
+void *MallocWrapper(JSRuntime *rt, size_t nbytes);
 JSObject *NewCallObject(JSContext *cx, HandleScript script,
-                        HandleShape shape, HandleTypeObject type, HeapSlot *slots);
+                        HandleShape shape, HandleTypeObject type);
 JSObject *NewStringObject(JSContext *cx, HandleString str);
 
 bool SPSEnter(JSContext *cx, HandleScript script);
 bool SPSExit(JSContext *cx, HandleScript script);
 
 bool OperatorIn(JSContext *cx, HandleValue key, HandleObject obj, bool *out);
 bool OperatorInI(JSContext *cx, uint32_t index, HandleObject obj, bool *out);
 
--- a/js/src/jit/mips/Assembler-mips.h
+++ b/js/src/jit/mips/Assembler-mips.h
@@ -50,16 +50,17 @@ static MOZ_CONSTEXPR_VAR Register t9 = {
 static MOZ_CONSTEXPR_VAR Register k0 = { Registers::k0 };
 static MOZ_CONSTEXPR_VAR Register k1 = { Registers::k1 };
 static MOZ_CONSTEXPR_VAR Register gp = { Registers::gp };
 static MOZ_CONSTEXPR_VAR Register sp = { Registers::sp };
 static MOZ_CONSTEXPR_VAR Register fp = { Registers::fp };
 static MOZ_CONSTEXPR_VAR Register ra = { Registers::ra };
 
 static MOZ_CONSTEXPR_VAR Register ScratchRegister = at;
+static MOZ_CONSTEXPR_VAR Register SecondScratchReg = t8;
 
 // Use arg reg from EnterJIT function as OsrFrameReg.
 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = a3;
 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = s3;
 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = t0;
 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = t1;
 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = t2;
 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = t3;
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/BaselineCompiler-mips.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/BaselineCompiler-mips.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPS::BaselineCompilerMIPS(JSContext *cx, TempAllocator &alloc,
+                                           HandleScript script)
+  : BaselineCompilerShared(cx, alloc, script)
+{
+}
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/BaselineCompiler-mips.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineCompiler_mips_h
+#define jit_mips_BaselineCompiler_mips_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPS : public BaselineCompilerShared
+{
+  protected:
+    BaselineCompilerMIPS(JSContext *cx, TempAllocator &alloc, HandleScript script);
+};
+
+typedef BaselineCompilerMIPS BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_BaselineCompiler_mips_h */
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/BaselineHelpers-mips.h
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineHelpers_mips_h
+#define jit_mips_BaselineHelpers_mips_h
+
+#ifdef JS_ION
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineRegisters.h"
+#include "jit/IonMacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler &masm)
+{
+    // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler &masm)
+{
+    // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffsetLabel *patchOffset, MacroAssembler &masm)
+{
+    // Move ICEntry offset into BaselineStubReg.
+    CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), BaselineStubReg);
+    *patchOffset = offset;
+
+    // Load stub pointer into BaselineStubReg.
+    masm.loadPtr(Address(BaselineStubReg, ICEntry::offsetOfFirstStub()), BaselineStubReg);
+
+    // Load stubcode pointer from BaselineStubEntry.
+    // R2 won't be active when we call ICs, so we can use it as scratch.
+    masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+    // Call the stubcode via a direct jump-and-link
+    masm.call(R2.scratchReg());
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler &masm,
+                       size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+    // This is expected to be called from within an IC, when BaselineStubReg
+    // is properly initialized to point to the stub.
+    masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg);
+
+    // Load stubcode pointer from BaselineStubEntry.
+    // R2 won't be active when we call ICs, so we can use it.
+    masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+    // Jump to the stubcode.
+    masm.branch(R2.scratchReg());
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler &masm)
+{
+    masm.branch(ra);
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler &masm, Register reg)
+{
+    masm.movePtr(reg, ra);
+}
+
+inline void
+EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize)
+{
+    // We assume during this that R0 and R1 have been pushed, and that R2 is
+    // unused.
+    MOZ_ASSERT(R2 == ValueOperand(t7, t6));
+
+    // Compute frame size.
+    masm.movePtr(BaselineFrameReg, t6);
+    masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), t6);
+    masm.subPtr(BaselineStackReg, t6);
+
+    // Store frame size without VMFunction arguments for GC marking.
+    masm.ma_subu(t7, t6, Imm32(argSize));
+    masm.storePtr(t7, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+    // Push frame descriptor and perform the tail call.
+    // BaselineTailCallReg (ra) already contains the return address (as we
+    // keep it there through the stub calls), but the VMWrapper code being
+    // called expects the return address to also be pushed on the stack.
+    MOZ_ASSERT(BaselineTailCallReg == ra);
+    masm.makeFrameDescriptor(t6, IonFrame_BaselineJS);
+    masm.subPtr(Imm32(sizeof(IonCommonFrameLayout)), StackPointer);
+    masm.storePtr(t6, Address(StackPointer, IonCommonFrameLayout::offsetOfDescriptor()));
+    masm.storePtr(ra, Address(StackPointer, IonCommonFrameLayout::offsetOfReturnAddress()));
+
+    masm.branch(target);
+}
+
+inline void
+EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg)
+{
+    // Compute stub frame size. We have to add two pointers: the stub reg and
+    // previous frame pointer pushed by EmitEnterStubFrame.
+    masm.movePtr(BaselineFrameReg, reg);
+    masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg);
+    masm.subPtr(BaselineStackReg, reg);
+
+    masm.makeFrameDescriptor(reg, IonFrame_BaselineStub);
+}
+
+inline void
+EmitCallVM(JitCode *target, MacroAssembler &masm)
+{
+    EmitCreateStubFrameDescriptor(masm, t6);
+    masm.push(t6);
+    masm.call(target);
+}
+
+struct BaselineStubFrame {
+    uintptr_t savedFrame;
+    uintptr_t savedStub;
+    uintptr_t returnAddress;
+    uintptr_t descriptor;
+};
+
+static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub);
+
+inline void
+EmitEnterStubFrame(MacroAssembler &masm, Register scratch)
+{
+    MOZ_ASSERT(scratch != BaselineTailCallReg);
+
+    // Compute frame size.
+    masm.movePtr(BaselineFrameReg, scratch);
+    masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+    masm.subPtr(BaselineStackReg, scratch);
+
+    masm.storePtr(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+    // Note: when making changes here, don't forget to update
+    // BaselineStubFrame if needed.
+
+    // Push frame descriptor and return address.
+    masm.makeFrameDescriptor(scratch, IonFrame_BaselineJS);
+    masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+    masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor)));
+    masm.storePtr(BaselineTailCallReg, Address(StackPointer,
+                                               offsetof(BaselineStubFrame, returnAddress)));
+
+    // Save old frame pointer, stack pointer and stub reg.
+    masm.storePtr(BaselineStubReg, Address(StackPointer,
+                                           offsetof(BaselineStubFrame, savedStub)));
+    masm.storePtr(BaselineFrameReg, Address(StackPointer,
+                                            offsetof(BaselineStubFrame, savedFrame)));
+    masm.movePtr(BaselineStackReg, BaselineFrameReg);
+
+    // We pushed 4 words, so the stack is still aligned to 8 bytes.
+    masm.checkStackAlignment();
+}
+
+inline void
+EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false)
+{
+    // Ion frames do not save and restore the frame pointer. If we called
+    // into Ion, we have to restore the stack pointer from the frame descriptor.
+    // If we performed a VM call, the descriptor has been popped already so
+    // in that case we use the frame pointer.
+    if (calledIntoIon) {
+        masm.pop(ScratchRegister);
+        masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister);
+        masm.addPtr(ScratchRegister, BaselineStackReg);
+    } else {
+        masm.movePtr(BaselineFrameReg, BaselineStackReg);
+    }
+
+    masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)),
+                 BaselineFrameReg);
+    masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)),
+                 BaselineStubReg);
+
+    // Load the return address.
+    masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)),
+                 BaselineTailCallReg);
+
+    // Discard the frame descriptor.
+    masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister);
+    masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+}
+
+inline void
+EmitStowICValues(MacroAssembler &masm, int values)
+{
+    MOZ_ASSERT(values >= 0 && values <= 2);
+    switch(values) {
+      case 1:
+        // Stow R0
+        masm.pushValue(R0);
+        break;
+      case 2:
+        // Stow R0 and R1
+        masm.pushValue(R0);
+        masm.pushValue(R1);
+        break;
+    }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false)
+{
+    MOZ_ASSERT(values >= 0 && values <= 2);
+    switch(values) {
+      case 1:
+        // Unstow R0.
+        if (discard)
+            masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+        else
+            masm.popValue(R0);
+        break;
+      case 2:
+        // Unstow R0 and R1.
+        if (discard) {
+            masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+        } else {
+            masm.popValue(R1);
+            masm.popValue(R0);
+        }
+        break;
+    }
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset)
+{
+    // R0 contains the value that needs to be typechecked.
+    // The object we're updating is a boxed Value on the stack, at offset
+    // objectOffset from $sp, excluding the return address.
+
+    // Save the current BaselineStubReg to stack, as well as the TailCallReg,
+    // since on mips, the $ra is live.
+    masm.subPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+    masm.storePtr(BaselineStubReg, Address(StackPointer, sizeof(intptr_t)));
+    masm.storePtr(BaselineTailCallReg, Address(StackPointer, 0));
+
+    // This is expected to be called from within an IC, when BaselineStubReg
+    // is properly initialized to point to the stub.
+    masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
+                 BaselineStubReg);
+
+    // Load stubcode pointer from BaselineStubReg into BaselineTailCallReg.
+    masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+    // Call the stubcode.
+    masm.call(R2.scratchReg());
+
+    // Restore the old stub reg and tailcall reg.
+    masm.loadPtr(Address(StackPointer, 0), BaselineTailCallReg);
+    masm.loadPtr(Address(StackPointer, sizeof(intptr_t)), BaselineStubReg);
+    masm.addPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+
+    // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+    // value in R0 type-checked properly or not.
+    Label success;
+    masm.ma_b(R1.scratchReg(), Imm32(1), &success, Assembler::Equal, ShortJump);
+
+    // If the IC failed, then call the update fallback function.
+    EmitEnterStubFrame(masm, R1.scratchReg());
+
+    masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+    masm.pushValue(R0);
+    masm.pushValue(R1);
+    masm.push(BaselineStubReg);
+
+    // Load previous frame pointer, push BaselineFrame *.
+    masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+    masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+    EmitCallVM(code, masm);
+    EmitLeaveStubFrame(masm);
+
+    // Success at end.
+    masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type)
+{
+    // On MIPS, $ra is clobbered by patchableCallPreBarrier. Save it first.
+    masm.push(ra);
+    masm.patchableCallPreBarrier(addr, type);
+    masm.pop(ra);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler &masm)
+{
+    // NOTE: This routine assumes that the stub guard code left the stack in
+    // the same state it was in when it was entered.
+
+    // BaselineStubEntry points to the current stub.
+
+    // Load next stub into BaselineStubReg
+    masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg);
+
+    // Load stubcode pointer from BaselineStubEntry into scratch register.
+    masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+    // Return address is already loaded, just jump to the next stubcode.
+    MOZ_ASSERT(BaselineTailCallReg == ra);
+    masm.branch(R2.scratchReg());
+}
+
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_ION
+
+#endif /* jit_mips_BaselineHelpers_mips_h */
+
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/BaselineIC-mips.cpp
@@ -0,0 +1,223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsiter.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineHelpers.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/IonLinker.h"
+
+#include "jsboolinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    // Guard that R0 is an integer and R1 is an integer.
+    Label failure;
+    Label conditionTrue;
+    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    // Compare payload regs of R0 and R1.
+    Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+    masm.ma_cmp_set(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), cond);
+
+    masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+
+    return true;
+}
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure, isNaN;
+    masm.ensureDouble(R0, FloatReg0, &failure);
+    masm.ensureDouble(R1, FloatReg1, &failure);
+
+    Register dest = R0.scratchReg();
+
+    Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+    masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+    masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    // Guard that R0 is an integer and R1 is an integer.
+    Label failure;
+    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+    masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+    // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
+    Register scratchReg = R2.payloadReg();
+
+    // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+    GeneralRegisterSet savedRegs = availableGeneralRegs(2);
+    savedRegs = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
+    ValueOperand savedValue = savedRegs.takeAnyValue();
+
+    Label goodMul, divTest1, divTest2;
+    switch(op_) {
+      case JSOP_ADD:
+        // We know R0.typeReg() already contains the integer tag. No boxing
+        // required.
+        masm.ma_addTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
+        break;
+      case JSOP_SUB:
+        masm.ma_subTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
+        break;
+      case JSOP_MUL: {
+        masm.ma_mul_branch_overflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+
+        masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);
+
+        // Result is -0 if operands have different signs.
+        masm.as_xor(t8, R0.payloadReg(), R1.payloadReg());
+        masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+
+        masm.bind(&goodMul);
+        masm.move32(scratchReg, R0.payloadReg());
+        break;
+      }
+      case JSOP_DIV:
+      case JSOP_MOD: {
+        // Check for INT_MIN / -1, it results in a double.
+        masm.ma_b(R0.payloadReg(), Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
+        masm.ma_b(R1.payloadReg(), Imm32(-1), &failure, Assembler::Equal, ShortJump);
+        masm.bind(&divTest1);
+
+        // Check for division by zero
+        masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::Equal, ShortJump);
+
+        // Check for 0 / X with X < 0 (results in -0).
+        masm.ma_b(R0.payloadReg(), Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
+        masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+        masm.bind(&divTest2);
+
+        masm.as_div(R0.payloadReg(), R1.payloadReg());
+
+        if (op_ == JSOP_DIV) {
+            // Result is a double if the remainder != 0.
+            masm.as_mfhi(scratchReg);
+            masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
+            masm.as_mflo(scratchReg);
+            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+        } else {
+            Label done;
+            // If X % Y == 0 and X < 0, the result is -0.
+            masm.as_mfhi(scratchReg);
+            masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+            masm.ma_b(R0.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+            masm.bind(&done);
+            masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+        }
+        break;
+      }
+      case JSOP_BITOR:
+        masm.ma_or(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+        break;
+      case JSOP_BITXOR:
+        masm.ma_xor(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+        break;
+      case JSOP_BITAND:
+        masm.ma_and(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+        break;
+      case JSOP_LSH:
+        // MIPS will only use 5 lowest bits in R1 as shift offset.
+        masm.ma_sll(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+        break;
+      case JSOP_RSH:
+        masm.ma_sra(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+        break;
+      case JSOP_URSH:
+        masm.ma_srl(scratchReg, R0.payloadReg(), R1.payloadReg());
+        if (allowDouble_) {
+            Label toUint;
+            masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);
+
+            // Move result and box for return.
+            masm.move32(scratchReg, R0.payloadReg());
+            EmitReturnFromIC(masm);
+
+            masm.bind(&toUint);
+            masm.convertUInt32ToDouble(scratchReg, FloatReg1);
+            masm.boxDouble(FloatReg1, R0);
+        } else {
+            masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+            // Move result for return.
+            masm.move32(scratchReg, R0.payloadReg());
+        }
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unhandled op for BinaryArith_Int32.");
+    }
+
+    EmitReturnFromIC(masm);
+
+    // Failure case - jump to next stub
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+
+    return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+    Label failure;
+    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+    switch (op) {
+      case JSOP_BITNOT:
+        masm.not32(R0.payloadReg());
+        break;
+      case JSOP_NEG:
+        // Guard against 0 and MIN_INT, both result in a double.
+        masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(INT32_MAX), &failure);
+
+        masm.neg32(R0.payloadReg());
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unexpected op");
+        return false;
+    }
+
+    EmitReturnFromIC(masm);
+
+    masm.bind(&failure);
+    EmitStubGuardFailure(masm);
+    return true;
+}
+
+
+} // namespace jit
+} // namespace js
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/BaselineRegisters-mips.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineRegisters_mips_h
+#define jit_mips_BaselineRegisters_mips_h
+
+#ifdef JS_ION
+
+#include "jit/IonMacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = s5;
+static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp;
+
+static MOZ_CONSTEXPR_VAR ValueOperand R0(v1, v0);
+static MOZ_CONSTEXPR_VAR ValueOperand R1(s7, s6);
+static MOZ_CONSTEXPR_VAR ValueOperand R2(t7, t6);
+
+// BaselineTailCallReg and BaselineStubReg
+// These use registers that are not preserved across calls.
+static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = ra;
+static MOZ_CONSTEXPR_VAR Register BaselineStubReg = t5;
+
+static MOZ_CONSTEXPR_VAR Register ExtractTemp0 = InvalidReg;
+static MOZ_CONSTEXPR_VAR Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerMIPS.
+static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that BaselineTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber BaselineTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0 = f0;
+static MOZ_CONSTEXPR_VAR FloatRegister FloatReg1 = f2;
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_ION
+
+#endif /* jit_mips_BaselineRegisters_mips_h */
+
--- a/js/src/jit/mips/MacroAssembler-mips.cpp
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -207,28 +207,28 @@ MacroAssemblerMIPS::negateDouble(FloatRe
 {
     as_negd(reg, reg);
 }
 
 void
 MacroAssemblerMIPS::inc64(AbsoluteAddress dest)
 {
     ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
-    as_lw(secondScratchReg_, ScratchRegister, 0);
-
-    as_addiu(secondScratchReg_, secondScratchReg_, 1);
-    as_sw(secondScratchReg_, ScratchRegister, 0);
-
-    as_sltiu(secondScratchReg_, secondScratchReg_, 1);
+    as_lw(SecondScratchReg, ScratchRegister, 0);
+
+    as_addiu(SecondScratchReg, SecondScratchReg, 1);
+    as_sw(SecondScratchReg, ScratchRegister, 0);
+
+    as_sltiu(SecondScratchReg, SecondScratchReg, 1);
     as_lw(ScratchRegister, ScratchRegister, 4);
 
-    as_addu(secondScratchReg_, ScratchRegister, secondScratchReg_);
+    as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
 
     ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
-    as_sw(secondScratchReg_, ScratchRegister, 4);
+    as_sw(SecondScratchReg, ScratchRegister, 4);
 }
 
 void
 MacroAssemblerMIPS::ma_move(Register rd, Register rs)
 {
     as_or(rd, rs, zero);
 }
 
@@ -403,17 +403,17 @@ void
 MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm)
 {
     ma_or(rd, rd, imm);
 }
 
 void
 MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm)
 {
-    if (Imm16::isInSignedRange(imm.value)) {
+    if (Imm16::isInUnsignedRange(imm.value)) {
         as_ori(rd, rs, imm.value);
     } else {
         ma_li(ScratchRegister, imm);
         as_or(rd, rs, ScratchRegister);
     }
 }
 
 // xor
@@ -433,17 +433,17 @@ void
 MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm)
 {
     ma_xor(rd, rd, imm);
 }
 
 void
 MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
 {
-    if (Imm16::isInSignedRange(imm.value)) {
+    if (Imm16::isInUnsignedRange(imm.value)) {
         as_xori(rd, rs, imm.value);
     } else {
         ma_li(ScratchRegister, imm);
         as_xor(rd, rs, ScratchRegister);
     }
 }
 
 // Arithmetic-based ops.
@@ -471,48 +471,48 @@ MacroAssemblerMIPS::ma_addu(Register rd,
 {
     ma_addu(rd, rd, imm);
 }
 
 void
 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
 {
     Label goodAddition;
-    as_addu(secondScratchReg_, rs, rt);
+    as_addu(SecondScratchReg, rs, rt);
 
     as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
     ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
 
     // If different sign, then overflow
-    as_xor(ScratchRegister, rs, secondScratchReg_);
+    as_xor(ScratchRegister, rs, SecondScratchReg);
     ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
 
     bind(&goodAddition);
-    ma_move(rd, secondScratchReg_);
+    ma_move(rd, SecondScratchReg);
 }
 
 void
 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
 {
     // Check for signed range because of as_addiu
     // Check for unsigned range because of as_xori
     if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) {
         Label goodAddition;
-        as_addiu(secondScratchReg_, rs, imm.value);
+        as_addiu(SecondScratchReg, rs, imm.value);
 
         // If different sign, no overflow
         as_xori(ScratchRegister, rs, imm.value);
         ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
 
         // If different sign, then overflow
-        as_xor(ScratchRegister, rs, secondScratchReg_);
+        as_xor(ScratchRegister, rs, SecondScratchReg);
         ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
 
         bind(&goodAddition);
-        ma_move(rd, secondScratchReg_);
+        ma_move(rd, SecondScratchReg);
     } else {
         ma_li(ScratchRegister, imm);
         ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
     }
 }
 
 // Subtract.
 void
@@ -539,27 +539,27 @@ MacroAssemblerMIPS::ma_subu(Register rd,
 }
 
 void
 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
 {
     Label goodSubtraction;
     // Use second scratch. The instructions generated by ma_b don't use the
     // second scratch register.
-    ma_subu(secondScratchReg_, rs, rt);
+    ma_subu(SecondScratchReg, rs, rt);
 
     as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
     ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
 
     // If different sign, then overflow
-    as_xor(ScratchRegister, rs, secondScratchReg_);
+    as_xor(ScratchRegister, rs, SecondScratchReg);
     ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
 
     bind(&goodSubtraction);
-    ma_move(rd, secondScratchReg_);
+    ma_move(rd, SecondScratchReg);
 }
 
 void
 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
 {
     if (imm.value != INT32_MIN) {
         ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
     } else {
@@ -576,18 +576,18 @@ MacroAssemblerMIPS::ma_mult(Register rs,
 }
 
 void
 MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
 {
     as_mult(rs, rt);
     as_mflo(rd);
     as_sra(ScratchRegister, rd, 31);
-    as_mfhi(secondScratchReg_);
-    ma_b(ScratchRegister, secondScratchReg_, overflow, Assembler::NotEqual);
+    as_mfhi(SecondScratchReg);
+    ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
 }
 
 void
 MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
 {
     ma_li(ScratchRegister, imm);
     ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
 }
@@ -648,26 +648,26 @@ MacroAssemblerMIPS::ma_mod_mask(Register
     bind(&negative);
     ma_li(hold, Imm32(-1));
     ma_negu(ScratchRegister, ScratchRegister);
 
     // Begin the main loop.
     bind(&head);
 
     // Extract the bottom bits into lr.
-    ma_and(secondScratchReg_, ScratchRegister, Imm32(mask));
+    ma_and(SecondScratchReg, ScratchRegister, Imm32(mask));
     // Add those bits to the accumulator.
-    as_addu(dest, dest, secondScratchReg_);
+    as_addu(dest, dest, SecondScratchReg);
     // Do a trial subtraction, this is the same operation as cmp, but we
     // store the dest
-    ma_subu(secondScratchReg_, dest, Imm32(mask));
+    ma_subu(SecondScratchReg, dest, Imm32(mask));
     // If (sum - C) > 0, store sum - C back into sum, thus performing a
     // modulus.
-    ma_b(secondScratchReg_, secondScratchReg_, &sumSigned, Signed, ShortJump);
-    ma_move(dest, secondScratchReg_);
+    ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+    ma_move(dest, SecondScratchReg);
     bind(&sumSigned);
     // Get rid of the bits that we extracted before.
     as_srl(ScratchRegister, ScratchRegister, shift);
     // If the shift produced zero, finish, otherwise, continue in the loop.
     ma_b(ScratchRegister, ScratchRegister, &head, NonZero, ShortJump);
     // Check the hold to see if we need to negate the result.
     ma_b(hold, hold, &done, NotSigned, ShortJump);
 
@@ -723,18 +723,18 @@ MacroAssemblerMIPS::ma_load(const Regist
         break;
     }
 }
 
 void
 MacroAssemblerMIPS::ma_load(const Register &dest, const BaseIndex &src,
                             LoadStoreSize size, LoadStoreExtension extension)
 {
-    computeScaledAddress(src, secondScratchReg_);
-    ma_load(dest, Address(secondScratchReg_, src.offset), size, extension);
+    computeScaledAddress(src, SecondScratchReg);
+    ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
 }
 
 void
 MacroAssemblerMIPS::ma_store(const Register &data, Address address, LoadStoreSize size,
                              LoadStoreExtension extension)
 {
     int16_t encodedOffset;
     Register base;
@@ -763,34 +763,34 @@ MacroAssemblerMIPS::ma_store(const Regis
         break;
     }
 }
 
 void
 MacroAssemblerMIPS::ma_store(const Register &data, const BaseIndex &dest,
                              LoadStoreSize size, LoadStoreExtension extension)
 {
-    computeScaledAddress(dest, secondScratchReg_);
-    ma_store(data, Address(secondScratchReg_, dest.offset), size, extension);
+    computeScaledAddress(dest, SecondScratchReg);
+    ma_store(data, Address(SecondScratchReg, dest.offset), size, extension);
 }
 
 void
 MacroAssemblerMIPS::ma_store(const Imm32 &imm, const BaseIndex &dest,
                              LoadStoreSize size, LoadStoreExtension extension)
 {
-    // Make sure that secondScratchReg_ contains absolute address so that
+    // Make sure that SecondScratchReg contains absolute address so that
     // offset is 0.
-    computeEffectiveAddress(dest, secondScratchReg_);
+    computeEffectiveAddress(dest, SecondScratchReg);
 
     // Scrach register is free now, use it for loading imm value
     ma_li(ScratchRegister, imm);
 
     // with offset=0 ScratchRegister will not be used in ma_store()
     // so we can use it as a parameter here
-    ma_store(ScratchRegister, Address(secondScratchReg_, 0), size, extension);
+    ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
 }
 
 void
 MacroAssemblerMIPS::computeScaledAddress(const BaseIndex &address, Register dest)
 {
     int32_t shift = Imm32::ShiftOf(address.scale).value;
     if (shift) {
         ma_sll(dest, address.index, Imm32(shift));
@@ -817,21 +817,21 @@ void
 MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
 {
     MOZ_ASSERT(address.base != ScratchRegister);
     ma_li(ScratchRegister, imm);
 
     if (Imm16::isInSignedRange(address.offset)) {
         as_sw(ScratchRegister, address.base, Imm16(address.offset).encode());
     } else {
-        MOZ_ASSERT(address.base != secondScratchReg_);
-
-        ma_li(secondScratchReg_, Imm32(address.offset));
-        as_addu(secondScratchReg_, address.base, secondScratchReg_);
-        as_sw(ScratchRegister, secondScratchReg_, 0);
+        MOZ_ASSERT(address.base != SecondScratchReg);
+
+        ma_li(SecondScratchReg, Imm32(address.offset));
+        as_addu(SecondScratchReg, address.base, SecondScratchReg);
+        as_sw(ScratchRegister, SecondScratchReg, 0);
     }
 }
 
 void
 MacroAssemblerMIPS::ma_pop(Register r)
 {
     as_lw(r, StackPointer, 0);
     as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
@@ -900,18 +900,18 @@ MacroAssemblerMIPS::ma_b(Register lhs, A
     MOZ_ASSERT(lhs != ScratchRegister);
     ma_lw(ScratchRegister, addr);
     ma_b(lhs, ScratchRegister, label, c, jumpKind);
 }
 
 void
 MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
 {
-    ma_lw(secondScratchReg_, addr);
-    ma_b(secondScratchReg_, imm, label, c, jumpKind);
+    ma_lw(SecondScratchReg, addr);
+    ma_b(SecondScratchReg, imm, label, c, jumpKind);
 }
 
 void
 MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
 {
     branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
 }
 
@@ -1390,18 +1390,18 @@ MacroAssemblerMIPS::ma_sd(FloatRegister 
         as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
         as_ss_Odd(ft, ScratchRegister, TAG_OFFSET);
     }
 }
 
 void
 MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address)
 {
-    computeScaledAddress(address, secondScratchReg_);
-    ma_sd(ft, Address(secondScratchReg_, address.offset));
+    computeScaledAddress(address, SecondScratchReg);
+    ma_sd(ft, Address(SecondScratchReg, address.offset));
 }
 
 void
 MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
 {
     if (Imm16::isInSignedRange(address.offset)) {
         as_ss(ft, address.base, Imm16(address.offset).encode());
     } else {
@@ -1409,18 +1409,18 @@ MacroAssemblerMIPS::ma_ss(FloatRegister 
         as_addu(ScratchRegister, address.base, ScratchRegister);
         as_ss(ft, ScratchRegister, 0);
     }
 }
 
 void
 MacroAssemblerMIPS::ma_ss(FloatRegister ft, BaseIndex address)
 {
-    computeScaledAddress(address, secondScratchReg_);
-    ma_ss(ft, Address(secondScratchReg_, address.offset));
+    computeScaledAddress(address, SecondScratchReg);
+    ma_ss(ft, Address(SecondScratchReg, address.offset));
 }
 
 void
 MacroAssemblerMIPS::ma_pop(FloatRegister fs)
 {
     ma_ld(fs, Address(StackPointer, 0));
     as_addiu(StackPointer, StackPointer, sizeof(double));
 }
@@ -1550,19 +1550,19 @@ MacroAssemblerMIPSCompat::add32(Imm32 im
 {
     ma_addu(dest, dest, imm);
 }
 
 void
 
 MacroAssemblerMIPSCompat::add32(Imm32 imm, const Address &dest)
 {
-    load32(dest, secondScratchReg_);
-    ma_addu(secondScratchReg_, imm);
-    store32(secondScratchReg_, dest);
+    load32(dest, SecondScratchReg);
+    ma_addu(SecondScratchReg, imm);
+    store32(SecondScratchReg, dest);
 }
 
 void
 MacroAssemblerMIPSCompat::sub32(Imm32 imm, Register dest)
 {
     ma_subu(dest, dest, imm);
 }
 
@@ -1581,42 +1581,48 @@ MacroAssemblerMIPSCompat::addPtr(Registe
 void
 MacroAssemblerMIPSCompat::addPtr(const Address &src, Register dest)
 {
     loadPtr(src, ScratchRegister);
     ma_addu(dest, ScratchRegister);
 }
 
 void
+MacroAssemblerMIPSCompat::subPtr(Register src, Register dest)
+{
+    ma_subu(dest, dest, src);
+}
+
+void
 MacroAssemblerMIPSCompat::not32(Register reg)
 {
     ma_not(reg, reg);
 }
 
 // Logical operations
 void
 MacroAssemblerMIPSCompat::and32(Imm32 imm, Register dest)
 {
     ma_and(dest, imm);
 }
 
 void
 MacroAssemblerMIPSCompat::and32(Imm32 imm, const Address &dest)
 {
-    load32(dest, secondScratchReg_);
-    ma_and(secondScratchReg_, imm);
-    store32(secondScratchReg_, dest);
+    load32(dest, SecondScratchReg);
+    ma_and(SecondScratchReg, imm);
+    store32(SecondScratchReg, dest);
 }
 
 void
 MacroAssemblerMIPSCompat::or32(Imm32 imm, const Address &dest)
 {
-    load32(dest, secondScratchReg_);
-    ma_or(secondScratchReg_, imm);
-    store32(secondScratchReg_, dest);
+    load32(dest, SecondScratchReg);
+    ma_or(SecondScratchReg, imm);
+    store32(SecondScratchReg, dest);
 }
 
 void
 MacroAssemblerMIPSCompat::xor32(Imm32 imm, Register dest)
 {
     ma_xor(dest, imm);
 }
 
@@ -1797,18 +1803,18 @@ void
 MacroAssemblerMIPSCompat::loadDouble(const Address &address, const FloatRegister &dest)
 {
     ma_ld(dest, address);
 }
 
 void
 MacroAssemblerMIPSCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest)
 {
-    computeScaledAddress(src, secondScratchReg_);
-    ma_ld(dest, Address(secondScratchReg_, src.offset));
+    computeScaledAddress(src, SecondScratchReg);
+    ma_ld(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
 MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest)
 {
     ma_ls(dest, address);
     as_cvtds(dest, dest);
 }
@@ -1824,25 +1830,25 @@ void
 MacroAssemblerMIPSCompat::loadFloat32(const Address &address, const FloatRegister &dest)
 {
     ma_ls(dest, address);
 }
 
 void
 MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest)
 {
-    computeScaledAddress(src, secondScratchReg_);
-    ma_ls(dest, Address(secondScratchReg_, src.offset));
+    computeScaledAddress(src, SecondScratchReg);
+    ma_ls(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
 MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const Address &address)
 {
-    ma_li(secondScratchReg_, imm);
-    ma_store(secondScratchReg_, address, SizeByte);
+    ma_li(SecondScratchReg, imm);
+    ma_store(SecondScratchReg, address, SizeByte);
 }
 
 void
 MacroAssemblerMIPSCompat::store8(const Register &src, const Address &address)
 {
     ma_store(src, address, SizeByte);
 }
 
@@ -1856,18 +1862,18 @@ void
 MacroAssemblerMIPSCompat::store8(const Register &src, const BaseIndex &dest)
 {
     ma_store(src, dest, SizeByte);
 }
 
 void
 MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const Address &address)
 {
-    ma_li(secondScratchReg_, imm);
-    ma_store(secondScratchReg_, address, SizeHalfWord);
+    ma_li(SecondScratchReg, imm);
+    ma_store(SecondScratchReg, address, SizeHalfWord);
 }
 
 void
 MacroAssemblerMIPSCompat::store16(const Register &src, const Address &address)
 {
     ma_store(src, address, SizeHalfWord);
 }
 
@@ -1993,26 +1999,26 @@ ToType(Operand base)
 {
     return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const Address &address, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(address, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
          (cond == Equal) ? AboveOrEqual : Below);
 }
 void
 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
          (cond == Equal) ? AboveOrEqual : Below);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const ValueOperand &value,
                                               Label *label)
 {
     branchTestPrimitive(cond, value.typeReg(), label);
@@ -2038,26 +2044,26 @@ MacroAssemblerMIPSCompat::branchTestInt3
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Address &address, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(address, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_INT32), label, cond);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_INT32), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const ValueOperand &value,
                                              Label *label)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
@@ -2069,18 +2075,18 @@ MacroAssemblerMIPSCompat:: branchTestBoo
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     ma_b(tag, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const ValueOperand &value, Label *label)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
     ma_b(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), label, actual);
@@ -2093,27 +2099,27 @@ MacroAssemblerMIPSCompat::branchTestDoub
     Condition actual = (cond == Equal) ? Below : AboveOrEqual;
     ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Address &address, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(address, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_CLEAR), label, cond);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     Condition actual = (cond == Equal) ? Below : AboveOrEqual;
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, actual);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const ValueOperand &value, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(value.typeReg(), ImmType(JSVAL_TYPE_NULL), label, cond);
 }
@@ -2124,18 +2130,18 @@ MacroAssemblerMIPSCompat::branchTestNull
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_NULL), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
 }
 
 
 void
 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const ValueOperand &value, Label *label)
 {
     branchTestObject(cond, value.typeReg(), label);
 }
@@ -2146,18 +2152,18 @@ MacroAssemblerMIPSCompat::branchTestObje
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
 }
 
 
 void
 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const ValueOperand &value, Label *label)
 {
     branchTestString(cond, value.typeReg(), label);
 }
@@ -2168,18 +2174,18 @@ MacroAssemblerMIPSCompat::branchTestStri
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_STRING), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const ValueOperand &value,
                                               Label *label)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     ma_b(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), label, cond);
@@ -2191,26 +2197,26 @@ MacroAssemblerMIPSCompat::branchTestUnde
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Address &address, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(address, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
 }
 
 
 void
 MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const ValueOperand &value, Label *label)
 {
     branchTestNumber(cond, value.typeReg(), label);
 }
@@ -2235,26 +2241,26 @@ MacroAssemblerMIPSCompat::branchTestMagi
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Address &address, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(address, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const BaseIndex &src, Label *label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
-    extractTag(src, secondScratchReg_);
-    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const ValueOperand &value,
                                           const Value &v, Label *label)
 {
     moveData(v, ScratchRegister);
 
@@ -2422,52 +2428,52 @@ MacroAssemblerMIPSCompat::loadConstantFl
     ma_lis(dest, f);
 }
 
 void
 MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address &src, const FloatRegister &dest)
 {
     Label notInt32, end;
     // If it's an int, convert it to double.
-    ma_lw(secondScratchReg_, Address(src.base, src.offset + TAG_OFFSET));
-    branchTestInt32(Assembler::NotEqual, secondScratchReg_, &notInt32);
-    ma_lw(secondScratchReg_, Address(src.base, src.offset + PAYLOAD_OFFSET));
-    convertInt32ToDouble(secondScratchReg_, dest);
+    ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+    branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+    ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+    convertInt32ToDouble(SecondScratchReg, dest);
     ma_b(&end, ShortJump);
 
     // Not an int, just load as double.
     bind(&notInt32);
     ma_ld(dest, src);
     bind(&end);
 }
 
 void
 MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
                                             const FloatRegister &dest, int32_t shift)
 {
     Label notInt32, end;
 
     // If it's an int, convert it to double.
 
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
     // Since we only have one scratch, we need to stomp over it with the tag.
-    load32(Address(secondScratchReg_, TAG_OFFSET), secondScratchReg_);
-    branchTestInt32(Assembler::NotEqual, secondScratchReg_, &notInt32);
-
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
-    load32(Address(secondScratchReg_, PAYLOAD_OFFSET), secondScratchReg_);
-    convertInt32ToDouble(secondScratchReg_, dest);
+    load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+    branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+    load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+    convertInt32ToDouble(SecondScratchReg, dest);
     ma_b(&end, ShortJump);
 
     // Not an int, just load as double.
     bind(&notInt32);
     // First, recompute the offset that had been stored in the scratch register
     // since the scratch register was overwritten loading in the type.
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
-    loadDouble(Address(secondScratchReg_, 0), dest);
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+    loadDouble(Address(SecondScratchReg, 0), dest);
     bind(&end);
 }
 
 void
 MacroAssemblerMIPSCompat::loadConstantDouble(double dp, const FloatRegister &dest)
 {
     ma_lid(dest, dp);
 }
@@ -2479,20 +2485,20 @@ MacroAssemblerMIPSCompat::branchTestInt3
     ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestStringTruthy(bool b, const ValueOperand &value, Label *label)
 {
     Register string = value.payloadReg();
     size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT);
-    ma_lw(secondScratchReg_, Address(string, JSString::offsetOfLengthAndFlags()));
-
-    // Use secondScratchReg_ because ma_and will clobber ScratchRegister
-    ma_and(ScratchRegister, secondScratchReg_, Imm32(mask));
+    ma_lw(SecondScratchReg, Address(string, JSString::offsetOfLengthAndFlags()));
+
+    // Use SecondScratchReg because ma_and will clobber ScratchRegister
+    ma_and(ScratchRegister, SecondScratchReg, Imm32(mask));
     ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
 }
 
 void
 MacroAssemblerMIPSCompat::branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label)
 {
     ma_lid(ScratchFloatReg, 0.0);
     DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
@@ -2582,84 +2588,84 @@ void
 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
 {
     storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex &dest)
 {
-    computeScaledAddress(dest, secondScratchReg_);
-    storeValue(val, Address(secondScratchReg_, dest.offset));
+    computeScaledAddress(dest, SecondScratchReg);
+    storeValue(val, Address(SecondScratchReg, dest.offset));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
 {
     computeScaledAddress(dest, ScratchRegister);
 
     // Make sure that ma_sw doesn't clobber ScratchRegister
     int32_t offset = dest.offset;
     if (!Imm16::isInSignedRange(offset)) {
-        ma_li(secondScratchReg_, Imm32(offset));
-        as_addu(ScratchRegister, ScratchRegister, secondScratchReg_);
+        ma_li(SecondScratchReg, Imm32(offset));
+        as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
         offset = 0;
     }
 
     storeValue(type, reg, Address(ScratchRegister, offset));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address &dest)
 {
     ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
     ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
 {
-    MOZ_ASSERT(dest.base != secondScratchReg_);
+    MOZ_ASSERT(dest.base != SecondScratchReg);
 
     ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
-    ma_li(secondScratchReg_, ImmTag(JSVAL_TYPE_TO_TAG(type)));
-    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
+    ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+    ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(const Value &val, Address dest)
 {
-    MOZ_ASSERT(dest.base != secondScratchReg_);
-
-    ma_li(secondScratchReg_, Imm32(getType(val)));
-    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
-    moveData(val, secondScratchReg_);
-    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+    MOZ_ASSERT(dest.base != SecondScratchReg);
+
+    ma_li(SecondScratchReg, Imm32(getType(val)));
+    ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+    moveData(val, SecondScratchReg);
+    ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
 }
 
 void
 MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest)
 {
     computeScaledAddress(dest, ScratchRegister);
 
     // Make sure that ma_sw doesn't clobber ScratchRegister
     int32_t offset = dest.offset;
     if (!Imm16::isInSignedRange(offset)) {
-        ma_li(secondScratchReg_, Imm32(offset));
-        as_addu(ScratchRegister, ScratchRegister, secondScratchReg_);
+        ma_li(SecondScratchReg, Imm32(offset));
+        as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
         offset = 0;
     }
     storeValue(val, Address(ScratchRegister, offset));
 }
 
 void
 MacroAssemblerMIPSCompat::loadValue(const BaseIndex &addr, ValueOperand val)
 {
-    computeScaledAddress(addr, secondScratchReg_);
-    loadValue(Address(secondScratchReg_, addr.offset), val);
+    computeScaledAddress(addr, SecondScratchReg);
+    loadValue(Address(SecondScratchReg, addr.offset), val);
 }
 
 void
 MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
 {
     // Ensure that loading the payload does not erase the pointer to the
     // Value in memory.
     if (src.base != val.payloadReg()) {
@@ -2709,58 +2715,58 @@ MacroAssemblerMIPSCompat::popValue(Value
     as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
     // Free stack.
     as_addiu(StackPointer, StackPointer, sizeof(Value));
 }
 
 void
 MacroAssemblerMIPSCompat::storePayload(const Value &val, Address dest)
 {
-    moveData(val, secondScratchReg_);
-    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+    moveData(val, SecondScratchReg);
+    ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
 }
 
 void
 MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
 {
     ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
     return;
 }
 
 void
 MacroAssemblerMIPSCompat::storePayload(const Value &val, Register base, Register index,
                                        int32_t shift)
 {
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
 
     moveData(val, ScratchRegister);
 
-    as_sw(ScratchRegister, secondScratchReg_, NUNBOX32_PAYLOAD_OFFSET);
+    as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
 }
 
 void
 MacroAssemblerMIPSCompat::storePayload(Register src, Register base, Register index, int32_t shift)
 {
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
-    as_sw(src, secondScratchReg_, NUNBOX32_PAYLOAD_OFFSET);
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+    as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
 }
 
 void
 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
 {
-    ma_li(secondScratchReg_, tag);
-    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
+    ma_li(SecondScratchReg, tag);
+    ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
 }
 
 void
 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift)
 {
-    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
     ma_li(ScratchRegister, tag);
-    as_sw(ScratchRegister, secondScratchReg_, TAG_OFFSET);
+    as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
 }
 
 void
 MacroAssemblerMIPSCompat::linkExitFrame()
 {
     uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop();
     movePtr(ImmPtr(dest), ScratchRegister);
     ma_sw(StackPointer, Address(ScratchRegister, 0));
--- a/js/src/jit/mips/MacroAssembler-mips.h
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -60,26 +60,17 @@ static const ValueOperand JSReturnOperan
 static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
 
 static Register CallReg = t9;
 static const int defaultShift = 3;
 static_assert(1 << defaultShift == sizeof(jsval), "The defaultShift is wrong");
 
 class MacroAssemblerMIPS : public Assembler
 {
-  protected:
-    Register secondScratchReg_;
-
   public:
-    MacroAssemblerMIPS() : secondScratchReg_(t8)
-    { }
-
-    Register  secondScratch() {
-        return secondScratchReg_;
-    }
 
     void convertBoolToInt32(Register source, Register dest);
     void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
     void convertInt32ToDouble(const Address &src, FloatRegister dest);
     void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
     void convertUInt32ToFloat32(const Register &src, const FloatRegister &dest);
     void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest);
     void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
@@ -571,18 +562,18 @@ class MacroAssemblerMIPSCompat : public 
             branch32(cond, lhs.toAddress(), rhs, label);
         }
     }
     void branch32(Condition cond, const Address &lhs, Register rhs, Label *label) {
         ma_lw(ScratchRegister, lhs);
         ma_b(ScratchRegister, rhs, label, cond);
     }
     void branch32(Condition cond, const Address &lhs, Imm32 rhs, Label *label) {
-        ma_lw(secondScratchReg_, lhs);
-        ma_b(secondScratchReg_, rhs, label, cond);
+        ma_lw(SecondScratchReg, lhs);
+        ma_b(SecondScratchReg, rhs, label, cond);
     }
     void branchPtr(Condition cond, const Address &lhs, Register rhs, Label *label) {
         branch32(cond, lhs, rhs, label);
     }
 
     void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
         branchPtr(cond, lhs, ptr, label);
     }
@@ -653,18 +644,18 @@ class MacroAssemblerMIPSCompat : public 
             ma_b(ScratchRegister, ScratchRegister, label, cond);
         }
     }
     void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
         ma_li(ScratchRegister, imm);
         branchTest32(cond, lhs, ScratchRegister, label);
     }
     void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
-        ma_lw(secondScratchReg_, address);
-        branchTest32(cond, secondScratchReg_, imm, label);
+        ma_lw(SecondScratchReg, address);
+        branchTest32(cond, SecondScratchReg, imm, label);
     }
     void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
         branchTest32(cond, lhs, rhs, label);
     }
     void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
         branchTest32(cond, lhs, rhs, label);
     }
     void branchTestPtr(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
@@ -707,48 +698,48 @@ public:
         ma_b(reg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
         CodeOffsetJump off = jumpWithPatch(label);
         bind(&skipJump);
         return off;
     }
 
     template <typename T>
     CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel *label) {
-        loadPtr(addr, secondScratchReg_);
+        loadPtr(addr, SecondScratchReg);
         movePtr(ptr, ScratchRegister);
         Label skipJump;
-        ma_b(secondScratchReg_, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+        ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
         CodeOffsetJump off = jumpWithPatch(label);
         bind(&skipJump);
         return off;
     }
     void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label *label) {
-        ma_lw(secondScratchReg_, addr);
+        ma_lw(SecondScratchReg, addr);
         ma_li(ScratchRegister, ptr);
-        ma_b(secondScratchReg_, ScratchRegister, label, cond);
+        ma_b(SecondScratchReg, ScratchRegister, label, cond);
     }
     void branchPtr(Condition cond, Address addr, ImmWord ptr, Label *label) {
-        ma_lw(secondScratchReg_, addr);
-        ma_b(secondScratchReg_, Imm32(ptr.value), label, cond);
+        ma_lw(SecondScratchReg, addr);
+        ma_b(SecondScratchReg, Imm32(ptr.value), label, cond);
     }
     void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
         branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
     }
     void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
         loadPtr(addr, ScratchRegister);
         ma_b(ScratchRegister, ptr, label, cond);
     }
     void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr,
                    Label *label) {
         loadPtr(addr, ScratchRegister);
         ma_b(ScratchRegister, ptr, label, cond);
     }
     void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
-        loadPtr(lhs, secondScratchReg_); // ma_b might use scratch
-        ma_b(secondScratchReg_, rhs, label, cond);
+        loadPtr(lhs, SecondScratchReg); // ma_b might use scratch
+        ma_b(SecondScratchReg, rhs, label, cond);
     }
     void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
         loadPtr(lhs, ScratchRegister);
         ma_b(ScratchRegister, rhs, label, cond);
     }
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
@@ -912,16 +903,17 @@ public:
     void xor32(Imm32 imm, Register dest);
     void xorPtr(Imm32 imm, Register dest);
     void xorPtr(Register src, Register dest);
     void orPtr(Imm32 imm, Register dest);
     void orPtr(Register src, Register dest);
     void andPtr(Imm32 imm, Register dest);
     void andPtr(Register src, Register dest);
     void addPtr(Register src, Register dest);
+    void subPtr(Register src, Register dest);
     void addPtr(const Address &src, Register dest);
     void not32(Register reg);
 
     void move32(const Imm32 &imm, const Register &dest);
     void move32(const Register &src, const Register &dest);
 
     void movePtr(const Register &src, const Register &dest);
     void movePtr(const ImmWord &imm, const Register &dest);
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/MoveEmitter-mips.cpp
@@ -0,0 +1,330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/MoveEmitter-mips.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveEmitterMIPS::MoveEmitterMIPS(MacroAssemblerMIPSCompat &masm)
+  : inCycle_(false),
+    masm(masm),
+    pushedAtCycle_(-1),
+    pushedAtSpill_(-1),
+    spilledReg_(InvalidReg),
+    spilledFloatReg_(InvalidFloatReg)
+{
+    pushedAtStart_ = masm.framePushed();
+}
+
+void
+MoveEmitterMIPS::emit(const MoveResolver &moves)
+{
+    if (moves.hasCycles()) {
+        // Reserve stack for cycle resolution
+        masm.reserveStack(sizeof(double));
+        pushedAtCycle_ = masm.framePushed();
+    }
+
+    for (size_t i = 0; i < moves.numMoves(); i++)
+        emit(moves.getMove(i));
+}
+
+MoveEmitterMIPS::~MoveEmitterMIPS()
+{
+    assertDone();
+}
+
+Address
+MoveEmitterMIPS::cycleSlot() const
+{
+    int offset = masm.framePushed() - pushedAtCycle_;
+    MOZ_ASSERT(Imm16::isInSignedRange(offset));
+    return Address(StackPointer, offset);
+}
+
+int32_t
+MoveEmitterMIPS::getAdjustedOffset(const MoveOperand &operand)
+{
+    MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+    if (operand.base() != StackPointer)
+        return operand.disp();
+
+    // Adjust offset if stack pointer has been moved.
+    return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address
+MoveEmitterMIPS::getAdjustedAddress(const MoveOperand &operand)
+{
+    return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+
+Register
+MoveEmitterMIPS::tempReg()
+{
+    spilledReg_ = SecondScratchReg;
+    return SecondScratchReg;
+}
+
+void
+MoveEmitterMIPS::breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type)
+{
+    // There is some pattern:
+    //   (A -> B)
+    //   (B -> A)
+    //
+    // This case handles (A -> B), which we reach first. We save B, then allow
+    // the original move to continue.
+    switch (type) {
+      case MoveOp::FLOAT32:
+        if (to.isMemory()) {
+            FloatRegister temp = ScratchFloatReg;
+            masm.loadFloat32(getAdjustedAddress(to), temp);
+            masm.storeFloat32(temp, cycleSlot());
+        } else {
+            masm.storeFloat32(to.floatReg(), cycleSlot());
+        }
+        break;
+      case MoveOp::DOUBLE:
+        if (to.isMemory()) {
+            FloatRegister temp = ScratchFloatReg;
+            masm.loadDouble(getAdjustedAddress(to), temp);
+            masm.storeDouble(temp, cycleSlot());
+        } else {
+            masm.storeDouble(to.floatReg(), cycleSlot());
+        }
+        break;
+      case MoveOp::INT32:
+        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+      case MoveOp::GENERAL:
+        if (to.isMemory()) {
+            Register temp = tempReg();
+            masm.loadPtr(getAdjustedAddress(to), temp);
+            masm.storePtr(temp, cycleSlot());
+        } else {
+            // Second scratch register should not be moved by MoveEmitter.
+            MOZ_ASSERT(to.reg() != spilledReg_);
+            masm.storePtr(to.reg(), cycleSlot());
+        }
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+    }
+}
+
+void
+MoveEmitterMIPS::completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type)
+{
+    // There is some pattern:
+    //   (A -> B)
+    //   (B -> A)
+    //
+    // This case handles (B -> A), which we reach last. We emit a move from the
+    // saved value of B, to A.
+    switch (type) {
+      case MoveOp::FLOAT32:
+        if (to.isMemory()) {
+            FloatRegister temp = ScratchFloatReg;
+            masm.loadFloat32(cycleSlot(), temp);
+            masm.storeFloat32(temp, getAdjustedAddress(to));
+        } else {
+            masm.loadFloat32(cycleSlot(), to.floatReg());
+        }
+        break;
+      case MoveOp::DOUBLE:
+        if (to.isMemory()) {
+            FloatRegister temp = ScratchFloatReg;
+            masm.loadDouble(cycleSlot(), temp);
+            masm.storeDouble(temp, getAdjustedAddress(to));
+        } else {
+            masm.loadDouble(cycleSlot(), to.floatReg());
+        }
+        break;
+      case MoveOp::INT32:
+        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+      case MoveOp::GENERAL:
+        if (to.isMemory()) {
+            Register temp = tempReg();
+            masm.loadPtr(cycleSlot(), temp);
+            masm.storePtr(temp, getAdjustedAddress(to));
+        } else {
+            // Second scratch register should not be moved by MoveEmitter.
+            MOZ_ASSERT(to.reg() != spilledReg_);
+            masm.loadPtr(cycleSlot(), to.reg());
+        }
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+    }
+}
+
+void
+MoveEmitterMIPS::emitMove(const MoveOperand &from, const MoveOperand &to)
+{
+    if (from.isGeneralReg()) {
+        // Second scratch register should not be moved by MoveEmitter.
+        MOZ_ASSERT(from.reg() != spilledReg_);
+
+        if (to.isGeneralReg())
+            masm.movePtr(from.reg(), to.reg());
+        else if (to.isMemory())
+            masm.storePtr(from.reg(), getAdjustedAddress(to));
+        else
+            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+    } else if (from.isMemory()) {
+        if (to.isGeneralReg()) {
+            masm.loadPtr(getAdjustedAddress(from), to.reg());
+        } else if (to.isMemory()) {
+            masm.loadPtr(getAdjustedAddress(from), tempReg());
+            masm.storePtr(tempReg(), getAdjustedAddress(to));
+        } else {
+            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+        }
+    } else if (from.isEffectiveAddress()) {
+        if (to.isGeneralReg()) {
+            masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+        } else if (to.isMemory()) {
+            masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+            masm.storePtr(tempReg(), getAdjustedAddress(to));
+        } else {
+            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+        }
+    } else {
+        MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+    }
+}
+
+void
+MoveEmitterMIPS::emitFloat32Move(const MoveOperand &from, const MoveOperand &to)
+{
+    // Ensure that we can use ScratchFloatReg in memory move.
+    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
+    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);
+
+    if (from.isFloatReg()) {
+        if (to.isFloatReg()) {
+            masm.moveFloat32(from.floatReg(), to.floatReg());
+        } else if (to.isGeneralReg()) {
+            // This should only be used when passing float parameter in a1,a2,a3
+            MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+            masm.as_mfc1(to.reg(), from.floatReg());
+        } else {
+            MOZ_ASSERT(to.isMemory());
+            masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+        }
+    } else if (to.isFloatReg()) {
+        MOZ_ASSERT(from.isMemory());
+        masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+    } else if (to.isGeneralReg()) {
+        MOZ_ASSERT(from.isMemory());
+        // This should only be used when passing float parameter in a1,a2,a3
+        MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+        masm.loadPtr(getAdjustedAddress(from), to.reg());
+    } else {
+        MOZ_ASSERT(from.isMemory());
+        MOZ_ASSERT(to.isMemory());
+        masm.loadFloat32(getAdjustedAddress(from), ScratchFloatReg);
+        masm.storeFloat32(ScratchFloatReg, getAdjustedAddress(to));
+    }
+}
+
+void
+MoveEmitterMIPS::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
+{
+    // Ensure that we can use ScratchFloatReg in memory move.
+    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
+    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);
+
+    if (from.isFloatReg()) {
+        if (to.isFloatReg()) {
+            masm.moveDouble(from.floatReg(), to.floatReg());
+        } else if (to.isGeneralReg()) {
+            // Used for passing double parameter in a2,a3 register pair.
+            // Two moves are added for one double parameter by
+            // MacroAssemblerMIPSCompat::passABIArg
+            if(to.reg() == a2)
+                masm.as_mfc1(a2, from.floatReg());
+            else if(to.reg() == a3)
+                masm.as_mfc1_Odd(a3, from.floatReg());
+            else
+                MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
+        } else {
+            MOZ_ASSERT(to.isMemory());
+            masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+        }
+    } else if (to.isFloatReg()) {
+        MOZ_ASSERT(from.isMemory());
+        masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+    } else if (to.isGeneralReg()) {
+        MOZ_ASSERT(from.isMemory());
+        // Used for passing double parameter in a2,a3 register pair.
+        // Two moves are added for one double parameter by
+        // MacroAssemblerMIPSCompat::passABIArg
+        if(to.reg() == a2)
+            masm.loadPtr(getAdjustedAddress(from), a2);
+        else if(to.reg() == a3)
+            masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+        else
+            MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
+    } else {
+        MOZ_ASSERT(from.isMemory());
+        MOZ_ASSERT(to.isMemory());
+        masm.loadDouble(getAdjustedAddress(from), ScratchFloatReg);
+        masm.storeDouble(ScratchFloatReg, getAdjustedAddress(to));
+    }
+}
+
+void
+MoveEmitterMIPS::emit(const MoveOp &move)
+{
+    const MoveOperand &from = move.from();
+    const MoveOperand &to = move.to();
+
+    if (move.isCycleEnd()) {
+        MOZ_ASSERT(inCycle_);
+        completeCycle(from, to, move.type());
+        inCycle_ = false;
+        return;
+    }
+
+    if (move.isCycleBegin()) {
+        MOZ_ASSERT(!inCycle_);
+        breakCycle(from, to, move.endCycleType());
+        inCycle_ = true;
+    }
+
+    switch (move.type()) {
+      case MoveOp::FLOAT32:
+        emitFloat32Move(from, to);
+        break;
+      case MoveOp::DOUBLE:
+        emitDoubleMove(from, to);
+        break;
+      case MoveOp::INT32:
+        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+      case MoveOp::GENERAL:
+        emitMove(from, to);
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+    }
+}
+
+void
+MoveEmitterMIPS::assertDone()
+{
+    MOZ_ASSERT(!inCycle_);
+}
+
+void
+MoveEmitterMIPS::finish()
+{
+    assertDone();
+
+    masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/MoveEmitter-mips.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_MoveEmitter_mips_h
+#define jit_mips_MoveEmitter_mips_h
+
+#include "jit/IonMacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+
+class MoveEmitterMIPS
+{
+    bool inCycle_;
+    MacroAssemblerMIPSCompat &masm;
+
+    // Original stack push value.
+    uint32_t pushedAtStart_;
+
+    // These store stack offsets to spill locations, snapshotting
+    // codegen->framePushed_ at the time they were allocated. They are -1 if no
+    // stack space has been allocated for that particular spill.
+    int32_t pushedAtCycle_;
+    int32_t pushedAtSpill_;
+
+    // These are registers that are available for temporary use. They may be
+    // assigned InvalidReg. If no corresponding spill space has been assigned,
+    // then these registers do not need to be spilled.
+    Register spilledReg_;
+    FloatRegister spilledFloatReg_;
+
+    void assertDone();
+    Register tempReg();
+    FloatRegister tempFloatReg();
+    Address cycleSlot() const;
+    int32_t getAdjustedOffset(const MoveOperand &operand);
+    Address getAdjustedAddress(const MoveOperand &operand);
+
+    void emitMove(const MoveOperand &from, const MoveOperand &to);
+    void emitFloat32Move(const MoveOperand &from, const MoveOperand &to);
+    void emitDoubleMove(const MoveOperand &from, const MoveOperand &to);
+    void breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type);
+    void completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type);
+    void emit(const MoveOp &move);
+
+  public:
+    MoveEmitterMIPS(MacroAssemblerMIPSCompat &masm);
+    ~MoveEmitterMIPS();
+    void emit(const MoveResolver &moves);
+    void finish();
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_MoveEmitter_mips_h */
--- a/js/src/jsapi-tests/moz.build
+++ b/js/src/jsapi-tests/moz.build
@@ -40,17 +40,16 @@ UNIFIED_SOURCES += [
     'testHashTableInit.cpp',
     'testIndexToString.cpp',
     'testIntern.cpp',
     'testIntString.cpp',
     'testIntTypesABI.cpp',
     'testJSEvaluateScript.cpp',
     'testLookup.cpp',
     'testLooselyEqual.cpp',
-    'testMappedArrayBuffer.cpp',
     'testNewObject.cpp',
     'testNullRoot.cpp',
     'testObjectEmulatingUndefined.cpp',
     'testOOM.cpp',
     'testOps.cpp',
     'testOriginPrincipals.cpp',
     'testParseJSON.cpp',
     'testPersistentRooted.cpp',
deleted file mode 100644
--- a/js/src/jsapi-tests/testMappedArrayBuffer.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- */
-
-#ifdef XP_UNIX
-#include <fcntl.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "jsfriendapi.h"
-#include "js/StructuredClone.h"
-#include "jsapi-tests/tests.h"
-#include "vm/ArrayBufferObject.h"
-
-const char test_data[] = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
-const char test_filename[] = "temp-bug945152_MappedArrayBuffer";
-
-BEGIN_TEST(testMappedArrayBuffer_bug945152)
-{
-    TempFile test_file;
-    FILE *test_stream = test_file.open(test_filename);
-    CHECK(fputs(test_data, test_stream) != EOF);
-    test_file.close();
-
-    // Offset 0.
-    CHECK(TestCreateObject(0, 12));
-
-    // Aligned offset.
-    CHECK(TestCreateObject(8, 12));
-
-    // Unaligned offset.
-    CHECK(CreateNewObject(11, 12) == nullptr);
-
-    // Offset + length greater than file size.
-    CHECK(CreateNewObject(8, sizeof(test_data) - 7) == nullptr);
-
-    // Release the mapped content.
-    CHECK(TestReleaseContents());
-
-#ifdef JSGC_USE_EXACT_ROOTING
-    // Ensure that fd is closed after object been GCed.
-    // Check the fd returned from object created in a function,
-    // then do the GC, in order to guarantee the object is freed when
-    // exact rooting is not on.
-    int fd = GetNewObjectFD();
-    GC(cx);
-    CHECK(!fd_is_valid(fd));
-#endif
-
-    // Neuter mapped array buffer.
-    CHECK(TestNeuterObject());
-
-    // Clone mapped array buffer.
-    CHECK(TestCloneObject());
-
-    test_file.remove();
-
-    return true;
-}
-
-JSObject *CreateNewObject(const int offset, const int length)
-{
-    int fd = open(test_filename, O_RDONLY);
-    void *ptr;
-    int new_fd;
-    if (!JS_CreateMappedArrayBufferContents(fd, &new_fd, offset, length, &ptr))
-        return nullptr;
-    JSObject *obj = JS_NewArrayBufferWithContents(cx, ptr);
-    close(fd);
-
-    return obj;
-}
-
-// Return the fd from object created in the stack.
-int GetNewObjectFD()
-{
-    JS::RootedObject obj(cx, CreateNewObject(0, 12));
-    int fd = getFD(obj);
-    CHECK(fd_is_valid(fd));
-
-    return fd;
-}
-
-bool VerifyObject(JS::HandleObject obj, const int offset, const int length)
-{
-    CHECK(obj != nullptr);
-    CHECK(JS_IsArrayBufferObject(obj));
-    CHECK_EQUAL(JS_GetArrayBufferByteLength(obj), length);
-    js::ArrayBufferObject *buf = &obj->as<js::ArrayBufferObject>();
-    CHECK(buf->isMappedArrayBuffer());
-    const char *data = reinterpret_cast<const char *>(JS_GetArrayBufferData(obj));
-    CHECK(data != nullptr);
-    CHECK(memcmp(data, test_data + offset, length) == 0);
-
-    return true;
-}
-
-bool TestCreateObject(const int offset, const int length)
-{
-    JS::RootedObject obj(cx, CreateNewObject(offset, length));
-    CHECK(VerifyObject(obj, offset, length));
-
-    return true;
-}
-
-bool TestReleaseContents()
-{
-    int fd = open(test_filename, O_RDONLY);
-    void *ptr;
-    int new_fd;
-    if (!JS_CreateMappedArrayBufferContents(fd, &new_fd, 0, 12, &ptr))
-        return false;
-    CHECK(fd_is_valid(new_fd));
-    JS_ReleaseMappedArrayBufferContents(new_fd, ptr, 12);
-    CHECK(!fd_is_valid(new_fd));
-    close(fd);
-
-    return true;
-}
-
-bool TestNeuterObject()
-{
-    JS::RootedObject obj(cx, CreateNewObject(8, 12));
-    CHECK(obj != nullptr);
-    int fd = getFD(obj);
-    CHECK(fd_is_valid(fd));
-    JS_NeuterArrayBuffer(cx, obj);
-    CHECK(isNeutered(obj));
-    CHECK(!fd_is_valid(fd));
-
-    return true;
-}
-
-bool TestCloneObject()
-{
-    JS::RootedObject obj1(cx, CreateNewObject(8, 12));
-    CHECK(obj1 != nullptr);
-    JSAutoStructuredCloneBuffer cloned_buffer;
-    JS::RootedValue v1(cx, OBJECT_TO_JSVAL(obj1));
-    const JSStructuredCloneCallbacks *callbacks = js::GetContextStructuredCloneCallbacks(cx);
-    CHECK(cloned_buffer.write(cx, v1, callbacks, nullptr));
-    JS::RootedValue v2(cx);
-    CHECK(cloned_buffer.read(cx, &v2, callbacks, nullptr));
-    JS::RootedObject obj2(cx, JSVAL_TO_OBJECT(v2));
-    CHECK(VerifyObject(obj2, 8, 12));
-
-    return true;
-}
-
-bool isNeutered(JS::HandleObject obj)
-{
-    JS::RootedValue v(cx);
-    return JS_GetProperty(cx, obj, "byteLength", &v) && v.toInt32() == 0;
-}
-
-int getFD(JS::HandleObject obj)
-{
-    CHECK(obj != nullptr);
-    js::ArrayBufferObject *buf = &obj->as<js::ArrayBufferObject>();
-    return buf->getMappingFD();
-}
-
-static bool fd_is_valid(int fd)
-{
-     return fcntl(fd, F_GETFD) != -1 || errno != EBADF;
-}
-
-static void GC(JSContext *cx)
-{
-    JS_GC(JS_GetRuntime(cx));
-    // Trigger another to wait for background finalization to end.
-    JS_GC(JS_GetRuntime(cx));
-}
-
-END_TEST(testMappedArrayBuffer_bug945152)
-#endif
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -3156,36 +3156,16 @@ JS_AllocateArrayBufferContents(JSContext
 /*
  * Reallocate memory allocated by JS_AllocateArrayBufferContents, growing or
  * shrinking it as appropriate.  The new data pointer will be returned in data.
  * If *contents is nullptr, behaves like JS_AllocateArrayBufferContents.
  */
 extern JS_PUBLIC_API(bool)
 JS_ReallocateArrayBufferContents(JSContext *cx, uint32_t nbytes, void **contents, uint8_t **data);
 
-/*
- * Create memory mapped array buffer contents.
- * For cloning, the fd will not be closed after mapping, and the caller must
- * take care of closing fd after calling this function.
- * A new duplicated fd used by the mapping is returned in new_fd.
- */
-extern JS_PUBLIC_API(bool)
-JS_CreateMappedArrayBufferContents(int fd, int *new_fd, size_t offset,
-                                   size_t length, void **contents);
-
-/*
- * Release the allocated resource of mapped array buffer contents before the
- * object is created.
- * If a new object has been created by JS_NewArrayBufferWithContents() with
- * this content, then JS_NeuterArrayBuffer() should be used instead to release
- * the resource used by the object.
- */
-extern JS_PUBLIC_API(void)
-JS_ReleaseMappedArrayBufferContents(int fd, void *contents, size_t length);
-
 extern JS_PUBLIC_API(JSIdArray *)
 JS_Enumerate(JSContext *cx, JSObject *obj);
 
 /*
  * Create an object to iterate over enumerable properties of obj, in arbitrary
  * property definition order.  NB: This differs from longstanding for..in loop
  * order, which uses order of property definition in obj.
  */
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -159,22 +159,21 @@ JSRuntime::createJitRuntime(JSContext *c
 
 bool
 JSCompartment::ensureJitCompartmentExists(JSContext *cx)
 {
     using namespace js::jit;
     if (jitCompartment_)
         return true;
 
-    JitRuntime *jitRuntime = cx->runtime()->getJitRuntime(cx);
-    if (!jitRuntime)
+    if (!zone()->getJitZone(cx))
         return false;
 
     /* Set the compartment early, so linking works. */
-    jitCompartment_ = cx->new_<JitCompartment>(jitRuntime);
+    jitCompartment_ = cx->new_<JitCompartment>();
 
     if (!jitCompartment_)
         return false;
 
     if (!jitCompartment_->initialize(cx)) {
         js_delete(jitCompartment_);
         jitCompartment_ = nullptr;
         return false;
@@ -911,34 +910,27 @@ void
 JSCompartment::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                       size_t *tiAllocationSiteTables,
                                       size_t *tiArrayTypeTables,
                                       size_t *tiObjectTypeTables,
                                       size_t *compartmentObject,
                                       size_t *shapesCompartmentTables,
                                       size_t *crossCompartmentWrappersArg,
                                       size_t *regexpCompartment,
-                                      size_t *debuggeesSet,
-                                      size_t *baselineStubsOptimized)
+                                      size_t *debuggeesSet)
 {
     *compartmentObject += mallocSizeOf(this);
     types.addSizeOfExcludingThis(mallocSizeOf, tiAllocationSiteTables,
                                  tiArrayTypeTables, tiObjectTypeTables);
     *shapesCompartmentTables += baseShapes.sizeOfExcludingThis(mallocSizeOf)
                               + initialShapes.sizeOfExcludingThis(mallocSizeOf)
                               + newTypeObjects.sizeOfExcludingThis(mallocSizeOf)
                               + lazyTypeObjects.sizeOfExcludingThis(mallocSizeOf);
     *crossCompartmentWrappersArg += crossCompartmentWrappers.sizeOfExcludingThis(mallocSizeOf);
     *regexpCompartment += regExps.sizeOfExcludingThis(mallocSizeOf);
     *debuggeesSet += debuggees.sizeOfExcludingThis(mallocSizeOf);
-#ifdef JS_ION
-    if (jitCompartment()) {
-        *baselineStubsOptimized +=
-            jitCompartment()->optimizedStubSpace()->sizeOfExcludingThis(mallocSizeOf);
-    }
-#endif
 }
 
 void
 JSCompartment::adoptWorkerAllocator(Allocator *workerAllocator)
 {
     zone()->allocator.arenas.adoptArenas(runtimeFromMainThread(), &workerAllocator->arenas);
 }
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -224,18 +224,17 @@ struct JSCompartment
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t *tiAllocationSiteTables,
                                 size_t *tiArrayTypeTables,
                                 size_t *tiObjectTypeTables,
                                 size_t *compartmentObject,
                                 size_t *shapesCompartmentTables,
                                 size_t *crossCompartmentWrappers,
                                 size_t *regexpCompartment,
-                                size_t *debuggeesSet,
-                                size_t *baselineStubsOptimized);
+                                size_t *debuggeesSet);
 
     /*
      * Shared scope property tree, and arena-pool for allocating its nodes.
      */
     js::PropertyTree             propertyTree;
 
     /* Set of all unowned base shapes in the compartment. */
     js::BaseShapeSet             baseShapes;
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -5294,16 +5294,19 @@ js::ReleaseAllJITCode(FreeOp *fop)
     /*
      * Scripts can entrain nursery things, inserting references to the script
      * into the store buffer. Clear the store buffer before discarding scripts.
      */
     MinorGC(fop->runtime(), JS::gcreason::EVICT_NURSERY);
 # endif
 
     for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+        if (!zone->jitZone())
+            continue;
+
 # ifdef DEBUG
         /* Assert no baseline scripts are marked as active. */
         for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
             JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
         }
 # endif
 
@@ -5317,16 +5320,18 @@ js::ReleaseAllJITCode(FreeOp *fop)
             jit::FinishInvalidation(fop, script);
 
             /*
              * Discard baseline script if it's not marked as active. Note that
              * this also resets the active flag.
              */
             jit::FinishDiscardBaselineScript(fop, script);
         }
+
+        zone->jitZone()->optimizedStubSpace()->free();
     }
 #endif
 }
 
 /*
  * There are three possible PCCount profiling states:
  *
  * 1. None: Neither scripts nor the runtime have count information.
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -489,16 +489,32 @@ TemporaryTypeSet *
 TypeSet::clone(LifoAlloc *alloc) const
 {
     TemporaryTypeSet *res = alloc->new_<TemporaryTypeSet>();
     if (!res || !clone(alloc, res))
         return nullptr;
     return res;
 }
 
+TemporaryTypeSet *
+TypeSet::filter(LifoAlloc *alloc, bool filterUndefined, bool filterNull) const
+{
+    TemporaryTypeSet *res = clone(alloc);
+    if (!res)
+        return nullptr;
+
+    if (filterUndefined)
+        res->flags = res->flags & ~TYPE_FLAG_UNDEFINED;
+
+    if (filterNull)
+        res->flags = res->flags & ~TYPE_FLAG_NULL;
+
+    return res;
+}
+
 /* static */ TemporaryTypeSet *
 TypeSet::unionSets(TypeSet *a, TypeSet *b, LifoAlloc *alloc)
 {
     TemporaryTypeSet *res = alloc->new_<TemporaryTypeSet>(a->baseFlags() | b->baseFlags(),
                                                           static_cast<TypeObjectKey**>(nullptr));
     if (!res)
         return nullptr;
 
@@ -4260,19 +4276,27 @@ TypeScript::Sweep(FreeOp *fop, JSScript 
 
 void
 TypeScript::destroy()
 {
     js_free(this);
 }
 
 void
-Zone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t *typePool)
+Zone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+                             size_t *typePool,
+                             size_t *baselineStubsOptimized)
 {
     *typePool += types.typeLifoAlloc.sizeOfExcludingThis(mallocSizeOf);
+#ifdef JS_ION
+    if (jitZone()) {
+        *baselineStubsOptimized +=
+            jitZone()->optimizedStubSpace()->sizeOfExcludingThis(mallocSizeOf);
+    }
+#endif
 }
 
 void
 TypeCompartment::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                         size_t *allocationSiteTables,
                                         size_t *arrayTypeTables,
                                         size_t *objectTypeTables)
 {
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -572,16 +572,19 @@ class TypeSet
 
     /* Forward all types in this set to the specified constraint. */
     bool addTypesToConstraint(JSContext *cx, TypeConstraint *constraint);
 
     // Clone a type set into an arbitrary allocator.
     TemporaryTypeSet *clone(LifoAlloc *alloc) const;
     bool clone(LifoAlloc *alloc, TemporaryTypeSet *result) const;
 
+    // Create a new TemporaryTypeSet where undefined and/or null has been filtered out.
+    TemporaryTypeSet *filter(LifoAlloc *alloc, bool filterUndefined, bool filterNull) const;
+
   protected:
     uint32_t baseObjectCount() const {
         return (flags & TYPE_FLAG_OBJECT_COUNT_MASK) >> TYPE_FLAG_OBJECT_COUNT_SHIFT;
     }
     inline void setBaseObjectCount(uint32_t count);
 
     inline void clearObjects();
 };
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -5903,18 +5903,16 @@ JSObject::addSizeOfExcludingThis(mozilla
         if (MOZ_UNLIKELY(elements->isAsmJSArrayBuffer())) {
 #if defined (JS_CPU_X64)
             // On x64, ArrayBufferObject::prepareForAsmJS switches the
             // ArrayBufferObject to use mmap'd storage.
             sizes->nonHeapElementsAsmJS += as<ArrayBufferObject>().byteLength();
 #else
             sizes->mallocHeapElementsAsmJS += mallocSizeOf(elements);
 #endif
-        } else if (MOZ_UNLIKELY(elements->isMappedArrayBuffer())) {
-            sizes->nonHeapElementsMapped += as<ArrayBufferObject>().byteLength();
         } else {
             sizes->mallocHeapElementsNonAsmJS += mallocSizeOf(elements);
         }
     }
 
     // Other things may be measured in the future if DMD indicates it is worthwhile.
     if (is<JSFunction>() ||
         is<JSObject>() ||
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -247,18 +247,17 @@ class JSObject : public js::ObjectImpl
     /*
      * Make a non-array object with the specified initial state. This method
      * takes ownership of any extantSlots it is passed.
      */
     static inline JSObject *create(js::ExclusiveContext *cx,
                                    js::gc::AllocKind kind,
                                    js::gc::InitialHeap heap,
                                    js::HandleShape shape,
-                                   js::HandleTypeObject type,
-                                   js::HeapSlot *extantSlots = nullptr);
+                                   js::HandleTypeObject type);
 
     /* Make an array object with the specified initial state. */
     static inline js::ArrayObject *createArray(js::ExclusiveContext *cx,
                                                js::gc::AllocKind kind,
                                                js::gc::InitialHeap heap,
                                                js::HandleShape shape,
                                                js::HandleTypeObject type,
                                                uint32_t length);
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -492,51 +492,40 @@ inline bool JSObject::isVarObj()
 {
     if (is<js::DebugScopeObject>())
         return as<js::DebugScopeObject>().scope().isVarObj();
     return lastProperty()->hasObjectFlag(js::BaseShape::VAROBJ);
 }
 
 /* static */ inline JSObject *
 JSObject::create(js::ExclusiveContext *cx, js::gc::AllocKind kind, js::gc::InitialHeap heap,
-                 js::HandleShape shape, js::HandleTypeObject type,
-                 js::HeapSlot *extantSlots /* = nullptr */)
+                 js::HandleShape shape, js::HandleTypeObject type)
 {
     /*
      * Callers must use dynamicSlotsCount to size the initial slot array of the
      * object. We can't check the allocated capacity of the dynamic slots, but
      * make sure their presence is consistent with the shape.
      */
     JS_ASSERT(shape && type);
     JS_ASSERT(type->clasp() == shape->getObjectClass());
     JS_ASSERT(type->clasp() != &js::ArrayObject::class_);
     JS_ASSERT(js::gc::GetGCKindSlots(kind, type->clasp()) == shape->numFixedSlots());
     JS_ASSERT_IF(type->clasp()->flags & JSCLASS_BACKGROUND_FINALIZE, IsBackgroundFinalized(kind));
     JS_ASSERT_IF(type->clasp()->finalize, heap == js::gc::TenuredHeap);
-    JS_ASSERT_IF(extantSlots, dynamicSlotsCount(shape->numFixedSlots(), shape->slotSpan(),
-                                                type->clasp()));
 
     const js::Class *clasp = type->clasp();
-    size_t nDynamicSlots = 0;
-    if (!extantSlots)
-        nDynamicSlots = dynamicSlotsCount(shape->numFixedSlots(), shape->slotSpan(), clasp);
+    size_t nDynamicSlots = dynamicSlotsCount(shape->numFixedSlots(), shape->slotSpan(), clasp);
 
     JSObject *obj = js::NewGCObject<js::CanGC>(cx, kind, nDynamicSlots, heap);
     if (!obj)
         return nullptr;
 
     obj->shape_.init(shape);
     obj->type_.init(type);
-    if (extantSlots) {
-#ifdef JSGC_GENERATIONAL
-        if (cx->isJSContext())
-            cx->asJSContext()->runtime()->gcNursery.notifyInitialSlots(obj, extantSlots);
-#endif
-        obj->slots = extantSlots;
-    }
+    // Note: slots are created and assigned internally by NewGCObject.
     obj->elements = js::emptyObjectElements;
 
     if (clasp->hasPrivate())
         obj->privateRef(shape->numFixedSlots()) = nullptr;
 
     size_t span = shape->slotSpan();
     if (span && clasp != &js::ArrayBufferObject::class_)
         obj->initializeSlotRange(0, span);
@@ -583,18 +572,16 @@ inline void
 JSObject::finish(js::FreeOp *fop)
 {
     if (hasDynamicSlots())
         fop->free_(slots);
     if (hasDynamicElements()) {
         js::ObjectElements *elements = getElementsHeader();
         if (MOZ_UNLIKELY(elements->isAsmJSArrayBuffer()))
             js::ArrayBufferObject::releaseAsmJSArrayBuffer(fop, this);
-        else if (MOZ_UNLIKELY(elements->isMappedArrayBuffer()))
-            js::ArrayBufferObject::releaseMappedArrayBuffer(fop, this);
         else
             fop->free_(elements);
     }
 }
 
 /* static */ inline bool
 JSObject::hasProperty(JSContext *cx, js::HandleObject obj,
                       js::HandleId id, bool *foundp, unsigned flags)
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -25,17 +25,16 @@
 #include "jsutil.h"
 #ifdef XP_WIN
 # include "jswin.h"
 #endif
 #include "jswrapper.h"
 
 #include "gc/Barrier.h"
 #include "gc/Marking.h"
-#include "gc/Memory.h"
 #include "jit/AsmJS.h"
 #include "jit/AsmJSModule.h"
 #include "vm/GlobalObject.h"
 #include "vm/Interpreter.h"
 #include "vm/NumericConversions.h"
 #include "vm/SharedArrayObject.h"
 #include "vm/WrapperObject.h"
 
@@ -469,36 +468,38 @@ ArrayBufferObject::changeContents(JSCont
         rt->gcNursery.notifyNewElements(this, newHeader);
 #endif
 
     initElementsHeader(newHeader, byteLengthCopy);
     InitViewList(this, viewListHead);
 }
 
 void
-ArrayBufferObject::neuter(JSContext *cx)
+ArrayBufferObject::neuter(ObjectElements *newHeader, JSContext *cx)
 {
-    JS_ASSERT(!isSharedArrayBuffer());
+    MOZ_ASSERT(!isSharedArrayBuffer());
 
-    JS_ASSERT(cx);
-    if (isMappedArrayBuffer()) {
-        releaseMappedArrayBuffer(nullptr, this);
-        setFixedElements();
-    } else if (hasDynamicElements() && !isAsmJSArrayBuffer()) {
+    if (hasStealableContents()) {
+        MOZ_ASSERT(newHeader);
+
         ObjectElements *oldHeader = getElementsHeader();
-        changeContents(cx, ObjectElements::fromElements(fixedElements()));
+        MOZ_ASSERT(newHeader != oldHeader);
+
+        changeContents(cx, newHeader);
 
         FreeOp fop(cx->runtime(), false);
         fop.free_(oldHeader);
+    } else {
+        elements = newHeader->elements();
     }
 
     uint32_t byteLen = 0;
-    updateElementsHeader(getElementsHeader(), byteLen);
+    updateElementsHeader(newHeader, byteLen);
 
-    getElementsHeader()->setIsNeuteredBuffer();
+    newHeader->setIsNeuteredBuffer();
 }
 
 /* static */ bool
 ArrayBufferObject::ensureNonInline(JSContext *cx, Handle<ArrayBufferObject*> buffer)
 {
     JS_ASSERT(!buffer->isSharedArrayBuffer());
     if (buffer->hasDynamicElements())
         return true;
@@ -629,43 +630,16 @@ ArrayBufferObject::neuterAsmJSArrayBuffe
 
     js_ReportOverRecursed(cx);
     return false;
 #else
     return true;
 #endif
 }
 
-void *
-ArrayBufferObject::createMappedArrayBuffer(int fd, int *new_fd, size_t offset, size_t length)
-{
-    void *ptr = AllocateMappedObject(fd, new_fd, offset, length, 8,
-                                     sizeof(MappingInfoHeader) + sizeof(ObjectElements));
-    if (!ptr)
-        return nullptr;
-
-    ptr = reinterpret_cast<void *>(uintptr_t(ptr) + sizeof(MappingInfoHeader));
-    ObjectElements *header = reinterpret_cast<ObjectElements *>(ptr);
-    initMappedElementsHeader(header, *new_fd, offset, length);
-
-    return ptr;
-}
-
-void
-ArrayBufferObject::releaseMappedArrayBuffer(FreeOp *fop, JSObject *obj)
-{
-    ArrayBufferObject &buffer = obj->as<ArrayBufferObject>();
-    if(!buffer.isMappedArrayBuffer() || buffer.isNeutered())
-        return;
-
-    ObjectElements *header = buffer.getElementsHeader();
-    if (header)
-        DeallocateMappedObject(buffer.getMappingFD(), header, header->initializedLength);
-}
-
 void
 ArrayBufferObject::addView(ArrayBufferViewObject *view)
 {
     // This view should never have been associated with a buffer before
     JS_ASSERT(view->bufferLink() == UNSET_BUFFER_LINK);
 
     // Note that pre-barriers are not needed here because either the list was
     // previously empty, in which case no pointer is being overwritten, or the
@@ -782,54 +756,58 @@ ArrayBufferObject::createDataViewForThis
     CallArgs args = CallArgsFromVp(argc, vp);
     return CallNonGenericMethod<IsArrayBuffer, createDataViewForThisImpl>(cx, args);
 }
 
 /* static */ bool
 ArrayBufferObject::stealContents(JSContext *cx, Handle<ArrayBufferObject*> buffer, void **contents,
                                  uint8_t **data)
 {
-    // If the ArrayBuffer's elements are dynamically allocated and nothing else
-    // prevents us from stealing them, transfer ownership directly.  Otherwise,
-    // the elements are small and allocated inside the ArrayBuffer object's GC
-    // header so we must make a copy.
+    uint32_t byteLen = buffer->byteLength();
+
+    // If the ArrayBuffer's elements are transferrable, transfer ownership
+    // directly.  Otherwise we have to copy the data into new elements.
     ObjectElements *transferableHeader;
-    bool stolen;
-    if (buffer->hasDynamicElements() && !buffer->isAsmJSArrayBuffer()) {
-        stolen = true;
+    ObjectElements *newHeader;
+    bool stolen = buffer->hasStealableContents();
+    if (stolen) {
         transferableHeader = buffer->getElementsHeader();
+
+        newHeader = AllocateArrayBufferContents(cx, byteLen);
+        if (!newHeader)
+            return false;
     } else {
-        stolen = false;
-
-        uint32_t byteLen = buffer->byteLength();
         transferableHeader = AllocateArrayBufferContents(cx, byteLen);
         if (!transferableHeader)
             return false;
 
         initElementsHeader(transferableHeader, byteLen);
         void *headerDataPointer = reinterpret_cast<void*>(transferableHeader->elements());
         memcpy(headerDataPointer, buffer->dataPointer(), byteLen);
+
+        // Keep using the current elements.
+        newHeader = buffer->getElementsHeader();
     }
 
     JS_ASSERT(!IsInsideNursery(cx->runtime(), transferableHeader));
     *contents = transferableHeader;
     *data = reinterpret_cast<uint8_t *>(transferableHeader + 1);
 
     // Neuter the views, which may also mprotect(PROT_NONE) the buffer. So do
     // it after copying out the data.
     if (!ArrayBufferObject::neuterViews(cx, buffer))
         return false;
 
-    // If the elements were taken from the neutered buffer, revert it back to
-    // using inline storage so it doesn't attempt to free the stolen elements
-    // when finalized.
+    // If the elements were transferrable, revert the buffer back to using
+    // inline storage so it doesn't attempt to free the stolen elements when
+    // finalized.
     if (stolen)
         buffer->changeContents(cx, ObjectElements::fromElements(buffer->fixedElements()));
 
-    buffer->neuter(cx);
+    buffer->neuter(newHeader, cx);
     return true;
 }
 
 void
 ArrayBufferObject::obj_trace(JSTracer *trc, JSObject *obj)
 {
     /*
      * If this object changes, it will get marked via the private data barrier,
@@ -1296,19 +1274,43 @@ JS_FRIEND_API(bool)
 JS_NeuterArrayBuffer(JSContext *cx, HandleObject obj)
 {
     if (!obj->is<ArrayBufferObject>()) {
         JS_ReportError(cx, "ArrayBuffer object required");
         return false;
     }
 
     Rooted<ArrayBufferObject*> buffer(cx, &obj->as<ArrayBufferObject>());
-    if (!ArrayBufferObject::neuterViews(cx, buffer))
+
+    ObjectElements *newHeader;
+    if (buffer->hasStealableContents()) {
+        // If we're "disposing" with the buffer contents, allocate zeroed
+        // memory of equal size and swap that in as contents.  This ensures
+        // that stale indexes that assume the original length, won't index out
+        // of bounds.  This is a temporary hack: when we're confident we've
+        // eradicated all stale accesses, we'll stop doing this.
+        newHeader = AllocateArrayBufferContents(cx, buffer->byteLength());
+        if (!newHeader)
+            return false;
+    } else {
+        // This case neuters out the existing elements in-place, so use the
+        // old header as new.
+        newHeader = buffer->getElementsHeader();
+    }
+
+    // Mark all views of the ArrayBuffer as neutered.
+    if (!ArrayBufferObject::neuterViews(cx, buffer)) {
+        if (buffer->hasStealableContents()) {
+            FreeOp fop(cx->runtime(), false);
+            fop.free_(newHeader);
+        }
         return false;
-    buffer->neuter(cx);
+    }
+
+    buffer->neuter(newHeader, cx);
     return true;
 }
 
 JS_FRIEND_API(JSObject *)
 JS_NewArrayBuffer(JSContext *cx, uint32_t nbytes)
 {
     JS_ASSERT(nbytes <= INT32_MAX);
     return ArrayBufferObject::create(cx, nbytes);
@@ -1385,31 +1387,16 @@ JS_StealArrayBufferContents(JSContext *c
 
     Rooted<ArrayBufferObject*> buffer(cx, &obj->as<ArrayBufferObject>());
     if (!ArrayBufferObject::stealContents(cx, buffer, contents, data))
         return false;