Merge mozilla-central to autoland. a=merge
authorCosmin Sabou <csabou@mozilla.com>
Sat, 01 Sep 2018 12:31:39 +0300
changeset 492168 a6c9a7299ec2a7169ec1f9dd5fa699e883515ecc
parent 492152 1082415e378fab5986f9c6b90d638d8a03690574 (current diff)
parent 492167 2667224d61d0242a29bfe2e442387a92cc58bc16 (diff)
child 492169 cd5cb85fa128eefd6bff9e3e40710d27e5b16ed0
push id1815
push userffxbld-merge
push dateMon, 15 Oct 2018 10:40:45 +0000
treeherdermozilla-release@18d4c09e9378 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland. a=merge
gfx/doc/AdvancedLayers.md
gfx/doc/AsyncPanZoom-HighLevel.png
gfx/doc/AsyncPanZoom.md
gfx/doc/GraphicsOverview.md
gfx/doc/LayersHistory.md
gfx/doc/MainPage.md
gfx/doc/MozSurface.md
gfx/doc/README.webrender
gfx/doc/SharedMozSurface.md
gfx/doc/Silk.md
gfx/doc/silkArchitecture.png
--- a/accessible/html/HTMLFormControlAccessible.cpp
+++ b/accessible/html/HTMLFormControlAccessible.cpp
@@ -233,21 +233,24 @@ already_AddRefed<nsIPersistentProperties
 HTMLTextFieldAccessible::NativeAttributes()
 {
   nsCOMPtr<nsIPersistentProperties> attributes =
     HyperTextAccessibleWrap::NativeAttributes();
 
   // Expose type for text input elements as it gives some useful context,
   // especially for mobile.
   nsAutoString type;
-  // In the case of input[type=number], mContent is anonymous and is an
-  // input[type=text]. Getting the root not-anonymous content will give
-  // us the right type. In case of other input types, this returns the same node.
-  nsIContent* content = mContent->FindFirstNonChromeOnlyAccessContent();
-  if (content->AsElement()->GetAttr(kNameSpaceID_None, nsGkAtoms::type, type)) {
+  // In the case of this element being part of a binding, the binding's
+  // parent's type should have precedence. For example an input[type=number]
+  // has an embedded anonymous input[type=text] (along with spinner buttons).
+  // In that case, we would want to take the input type from the parent
+  // and not the anonymous content.
+  nsIContent* widgetElm = BindingParent();
+  if ((widgetElm && widgetElm->AsElement()->GetAttr(kNameSpaceID_None, nsGkAtoms::type, type)) ||
+      mContent->AsElement()->GetAttr(kNameSpaceID_None, nsGkAtoms::type, type)) {
     nsAccUtils::SetAccAttr(attributes, nsGkAtoms::textInputType, type);
     if (!ARIARoleMap() && type.EqualsLiteral("search")) {
       nsAccUtils::SetAccAttr(attributes, nsGkAtoms::xmlroles,
                              NS_LITERAL_STRING("searchbox"));
     }
   }
 
   return attributes.forget();
@@ -256,17 +259,17 @@ HTMLTextFieldAccessible::NativeAttribute
 ENameValueFlag
 HTMLTextFieldAccessible::NativeName(nsString& aName) const
 {
   ENameValueFlag nameFlag = Accessible::NativeName(aName);
   if (!aName.IsEmpty())
     return nameFlag;
 
   // If part of compound of XUL widget then grab a name from XUL widget element.
-  nsIContent* widgetElm = XULWidgetElm();
+  nsIContent* widgetElm = BindingParent();
   if (widgetElm)
     XULElmName(mDoc, widgetElm, aName);
 
   if (!aName.IsEmpty())
     return eNameOK;
 
   // text inputs and textareas might have useful placeholder text
   mContent->AsElement()->GetAttr(kNameSpaceID_None, nsGkAtoms::placeholder, aName);
@@ -297,17 +300,17 @@ HTMLTextFieldAccessible::Value(nsString&
 void
 HTMLTextFieldAccessible::ApplyARIAState(uint64_t* aState) const
 {
   HyperTextAccessibleWrap::ApplyARIAState(aState);
   aria::MapToState(aria::eARIAAutoComplete, mContent->AsElement(), aState);
 
   // If part of compound of XUL widget then pick up ARIA stuff from XUL widget
   // element.
-  nsIContent* widgetElm = XULWidgetElm();
+  nsIContent* widgetElm = BindingParent();
   if (widgetElm)
     aria::MapToState(aria::eARIAAutoComplete, widgetElm->AsElement(), aState);
 }
 
 uint64_t
 HTMLTextFieldAccessible::NativeState() const
 {
   uint64_t state = HyperTextAccessibleWrap::NativeState();
@@ -342,17 +345,17 @@ HTMLTextFieldAccessible::NativeState() c
     return state;
   }
 
   // Expose autocomplete state if it has associated autocomplete list.
   if (mContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::list_))
     return state | states::SUPPORTS_AUTOCOMPLETION | states::HASPOPUP;
 
   // Ordinal XUL textboxes don't support autocomplete.
-  if (!XULWidgetElm() && Preferences::GetBool("browser.formfill.enable")) {
+  if (!BindingParent() && Preferences::GetBool("browser.formfill.enable")) {
     // Check to see if autocompletion is allowed on this input. We don't expose
     // it for password fields even though the entire password can be remembered
     // for a page if the user asks it to be. However, the kind of autocomplete
     // we're talking here is based on what the user types, where a popup of
     // possible choices comes up.
     nsAutoString autocomplete;
     mContent->AsElement()->GetAttr(kNameSpaceID_None, nsGkAtoms::autocomplete,
                                    autocomplete);
--- a/accessible/html/HTMLFormControlAccessible.h
+++ b/accessible/html/HTMLFormControlAccessible.h
@@ -109,17 +109,17 @@ protected:
   virtual ~HTMLTextFieldAccessible() {}
 
   // Accessible
   virtual ENameValueFlag NativeName(nsString& aName) const override;
 
   /**
    * Return a XUL widget element this input is part of.
    */
-  nsIContent* XULWidgetElm() const { return mContent->GetBindingParent(); }
+  nsIContent* BindingParent() const { return mContent->GetBindingParent(); }
 };
 
 
 /**
  * Accessible for input@type="file" element.
  */
 class HTMLFileInputAccessible : public HyperTextAccessibleWrap
 {
--- a/browser/base/content/browser.xul
+++ b/browser/base/content/browser.xul
@@ -1265,17 +1265,17 @@ xmlns="http://www.w3.org/1999/xhtml"
         <browser id="sidebar" flex="1" autoscroll="false" disablehistory="true" disablefullscreen="true"
                   style="min-width: 14em; width: 18em; max-width: 36em;" tooltip="aHTMLTooltip"/>
       </vbox>
 
       <splitter id="sidebar-splitter" class="chromeclass-extrachrome sidebar-splitter" hidden="true"/>
       <vbox id="appcontent" flex="1">
         <notificationbox id="high-priority-global-notificationbox" notificationside="top"/>
         <tabbox id="tabbrowser-tabbox"
-                flex="1" eventnode="document" tabcontainer="tabbrowser-tabs">
+                flex="1" tabcontainer="tabbrowser-tabs">
           <tabpanels id="tabbrowser-tabpanels"
                      flex="1" class="plain" selectedIndex="0"/>
         </tabbox>
       </vbox>
       <vbox id="browser-border-end" hidden="true" layer="true"/>
     </hbox>
 #include ../../components/customizableui/content/customizeMode.inc.xul
   </deck>
--- a/browser/extensions/formautofill/test/browser/browser_privacyPreferences.js
+++ b/browser/extensions/formautofill/test/browser/browser_privacyPreferences.js
@@ -76,23 +76,29 @@ add_task(async function test_autofillChe
     await finalPrefPaneLoaded;
     await ContentTask.spawn(browser, SELECTORS, (selectors) => {
       is(content.document.querySelector(selectors.group).hidden, false,
         "Form Autofill group should be visible");
       is(content.document.querySelector(selectors.addressAutofillCheckbox).checked, false,
         "Checkbox should be unchecked when Autofill Addresses is disabled");
       is(content.document.querySelector(selectors.creditCardAutofillCheckbox).checked, false,
         "Checkbox should be unchecked when Autofill Credit Cards is disabled");
+      content.document.querySelector(selectors.addressAutofillCheckbox)
+        .scrollIntoView({block: "center", behavior: "instant"});
     });
 
     info("test toggling the checkboxes");
     await BrowserTestUtils.synthesizeMouseAtCenter(SELECTORS.addressAutofillCheckbox, {}, browser);
     is(Services.prefs.getBoolPref(ENABLED_AUTOFILL_ADDRESSES_PREF), true,
        "Check address autofill is now enabled");
 
+    await ContentTask.spawn(browser, SELECTORS, (selectors) => {
+      content.document.querySelector(selectors.creditCardAutofillCheckbox)
+        .scrollIntoView({block: "center", behavior: "instant"});
+    });
     await BrowserTestUtils.synthesizeMouseAtCenter(SELECTORS.creditCardAutofillCheckbox, {}, browser);
     is(Services.prefs.getBoolPref(ENABLED_AUTOFILL_CREDITCARDS_PREF), true,
        "Check credit card autofill is now enabled");
   });
 });
 
 add_task(async function test_creditCardNotAvailable() {
   await SpecialPowers.pushPrefEnv({set: [[AUTOFILL_CREDITCARDS_AVAILABLE_PREF, false]]});
--- a/build/autoconf/expandlibs.m4
+++ b/build/autoconf/expandlibs.m4
@@ -24,43 +24,20 @@ AC_CACHE_CHECK(what kind of list files a
              dnl first because clang understands @file, but may pass an
              dnl oversized argument list to the linker depending on the
              dnl contents of @file.
              if AC_TRY_COMMAND(${CC-cc} -o conftest${ac_exeext} $CFLAGS $CPPFLAGS $LDFLAGS [-Wl,-filelist,conftest.list] $LIBS 1>&5) && test -s conftest${ac_exeext}; then
                  EXPAND_LIBS_LIST_STYLE=filelist
              elif AC_TRY_COMMAND(${CC-cc} -o conftest${ac_exeext} $CFLAGS $CPPFLAGS $LDFLAGS @conftest.list $LIBS 1>&5) && test -s conftest${ac_exeext}; then
                  EXPAND_LIBS_LIST_STYLE=list
              else
-                 EXPAND_LIBS_LIST_STYLE=none
+                 AC_ERROR([Couldn't find one that works])
              fi
          fi
      else
          dnl We really don't expect to get here, but just in case
          AC_ERROR([couldn't compile a simple C file])
      fi
      rm -rf conftest*])
 
-LIBS_DESC_SUFFIX=desc
-AC_SUBST(LIBS_DESC_SUFFIX)
 AC_SUBST(EXPAND_LIBS_LIST_STYLE)
 
-if test "$GCC_USE_GNU_LD"; then
-    AC_CACHE_CHECK(what kind of ordering can be done with the linker,
-        EXPAND_LIBS_ORDER_STYLE,
-        [> conftest.order
-         _SAVE_LDFLAGS="$LDFLAGS"
-         LDFLAGS="${LDFLAGS} -Wl,--section-ordering-file,conftest.order"
-         AC_TRY_LINK([], [],
-             EXPAND_LIBS_ORDER_STYLE=section-ordering-file,
-             EXPAND_LIBS_ORDER_STYLE=)
-         LDFLAGS="$_SAVE_LDFLAGS"
-         if test -z "$EXPAND_LIBS_ORDER_STYLE"; then
-             if AC_TRY_COMMAND(${CC-cc} ${DSO_LDOPTS} ${LDFLAGS} -o conftest -Wl,--verbose 2> /dev/null | sed -n '/^===/,/^===/p' | grep '\.text'); then
-                 EXPAND_LIBS_ORDER_STYLE=linkerscript
-             else
-                 EXPAND_LIBS_ORDER_STYLE=none
-             fi
-             rm -f ${DLL_PREFIX}conftest${DLL_SUFFIX}
-         fi])
-fi
-AC_SUBST(EXPAND_LIBS_ORDER_STYLE)
-
 ])
--- a/devtools/client/accessibility/accessibility-startup.js
+++ b/devtools/client/accessibility/accessibility-startup.js
@@ -1,29 +1,28 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const Services = require("Services");
-const { AccessibilityFront } = require("devtools/shared/fronts/accessibility");
 
 // @remove after release 63 (See Bug 1482461)
 const PROMOTE_COUNT_PREF = "devtools.promote.accessibility";
 
 /**
  * Component responsible for all accessibility panel startup steps before the panel is
  * actually opened.
  */
 class AccessibilityStartup {
   constructor(toolbox) {
     this.toolbox = toolbox;
 
-    this._updateAccessibilityState = this._updateAccessibilityState.bind(this);
+    this._updateToolHighlight = this._updateToolHighlight.bind(this);
 
     // Creates accessibility front.
     this.initAccessibility();
   }
 
   get target() {
     return this.toolbox.target;
   }
@@ -36,41 +35,40 @@ class AccessibilityStartup {
   }
 
   get walker() {
     return this._walker;
   }
 
   /**
    * Fully initialize accessibility front. Also add listeners for accessibility
-   * service lifecycle events that affect picker state and the state of the tool tab
-   * highlight.
+   * service lifecycle events that affect the state of the tool tab highlight.
    * @return {Promise}
    *         A promise for when accessibility front is fully initialized.
    */
   initAccessibility() {
     if (!this._initAccessibility) {
       this._initAccessibility = (async function() {
-        this._accessibility = new AccessibilityFront(this.target.client,
-                                                     this.target.form);
+        this._accessibility = this.target.getFront("accessibility");
         // We must call a method on an accessibility front here (such as getWalker), in
         // oreder to be able to check actor's backward compatibility via actorHasMethod.
         // See targe.js@getActorDescription for more information.
         this._walker = await this._accessibility.getWalker();
         // Only works with FF61+ targets
         this._supportsLatestAccessibility =
           await this.target.actorHasMethod("accessibility", "enable");
 
         if (this._supportsLatestAccessibility) {
           await this._accessibility.bootstrap();
         }
 
-        this._updateAccessibilityState();
-        this._accessibility.on("init", this._updateAccessibilityState);
-        this._accessibility.on("shutdown", this._updateAccessibilityState);
+        this._updateToolHighlight();
+
+        this._accessibility.on("init", this._updateToolHighlight);
+        this._accessibility.on("shutdown", this._updateToolHighlight);
       }.bind(this))();
     }
 
     return this._initAccessibility;
   }
 
   /**
    * Destroy accessibility front. Also remove listeners for accessibility service
@@ -87,53 +85,35 @@ class AccessibilityStartup {
       if (!this._accessibility) {
         return;
       }
 
       // Ensure that the accessibility isn't still being initiated, otherwise race
       // conditions in the initialization process can throw errors.
       await this._initAccessibility;
 
-      this._accessibility.off("init", this._updateAccessibilityState);
-      this._accessibility.off("shutdown", this._updateAccessibilityState);
+      this._accessibility.off("init", this._updateToolHighlight);
+      this._accessibility.off("shutdown", this._updateToolHighlight);
 
       await this._walker.destroy();
-      await this._accessibility.destroy();
       this._accessibility = null;
       this._walker = null;
     }.bind(this))();
     return this._destroyingAccessibility;
   }
 
   /**
-   * Update states of the accessibility picker and accessibility tab highlight.
-   * @return {[type]} [description]
-   */
-  _updateAccessibilityState() {
-    this._updateAccessibilityToolHighlight();
-    this._updatePickerButton();
-  }
-
-  /**
-   * Update picker button state and ensure toolbar is re-rendered correctly.
-   */
-  _updatePickerButton() {
-    this.toolbox.updatePickerButton();
-    // Calling setToolboxButtons to make sure toolbar is re-rendered correctly.
-    this.toolbox.component.setToolboxButtons(this.toolbox.toolbarButtons);
-  }
-
-  /**
    * Set the state of the accessibility tab highlight depending on whether the
    * accessibility service is initialized or shutdown.
    */
-  _updateAccessibilityToolHighlight() {
-    if (this._accessibility.enabled) {
+  async _updateToolHighlight() {
+    const isHighlighted = await this.toolbox.isToolHighlighted("accessibility");
+    if (this._accessibility.enabled && !isHighlighted) {
       this.toolbox.highlightTool("accessibility");
-    } else {
+    } else if (!this._accessibility.enabled && isHighlighted) {
       this.toolbox.unhighlightTool("accessibility");
     }
   }
 
   // @remove after release 63 (See Bug 1482461)
   updatePanelPromoteCount() {
     Services.prefs.setIntPref(PROMOTE_COUNT_PREF, 0);
   }
--- a/devtools/client/accessibility/panel.js
+++ b/devtools/client/accessibility/panel.js
@@ -34,17 +34,17 @@ function AccessibilityPanel(iframeWindow
 
   this.onTabNavigated = this.onTabNavigated.bind(this);
   this.onPanelVisibilityChange = this.onPanelVisibilityChange.bind(this);
   this.onNewAccessibleFrontSelected =
     this.onNewAccessibleFrontSelected.bind(this);
   this.onAccessibilityInspectorUpdated =
     this.onAccessibilityInspectorUpdated.bind(this);
   this.updateA11YServiceDurationTimer = this.updateA11YServiceDurationTimer.bind(this);
-  this.updatePickerButton = this.updatePickerButton.bind(this);
+  this.forceUpdatePickerButton = this.forceUpdatePickerButton.bind(this);
 
   EventEmitter.decorate(this);
 }
 
 AccessibilityPanel.prototype = {
   /**
    * Open is effectively an asynchronous constructor.
    */
@@ -87,16 +87,19 @@ AccessibilityPanel.prototype = {
     }
 
     this.startup.updatePanelPromoteCount();
 
     this.updateA11YServiceDurationTimer();
     this.front.on("init", this.updateA11YServiceDurationTimer);
     this.front.on("shutdown", this.updateA11YServiceDurationTimer);
 
+    this.front.on("init", this.forceUpdatePickerButton);
+    this.front.on("shutdown", this.forceUpdatePickerButton);
+
     this.isReady = true;
     this.emit("ready");
     resolver(this);
     return this._opening;
   },
 
   onNewAccessibleFrontSelected(selected) {
     this.emit("new-accessible-front-selected", selected);
@@ -176,16 +179,27 @@ AccessibilityPanel.prototype = {
 
     this.panelWin.dispatchEvent(event);
   },
 
   updatePickerButton() {
     this.picker && this.picker.updateButton();
   },
 
+  forceUpdatePickerButton() {
+    // Only update picker button when the panel is selected.
+    if (!this.isVisible) {
+      return;
+    }
+
+    this.updatePickerButton();
+    // Calling setToolboxButtons to make sure toolbar is forced to re-render.
+    this._toolbox.component.setToolboxButtons(this._toolbox.toolbarButtons);
+  },
+
   togglePicker(focus) {
     this.picker && this.picker.toggle();
   },
 
   cancelPicker() {
     this.picker && this.picker.cancel();
   },
 
@@ -236,16 +250,19 @@ AccessibilityPanel.prototype = {
       this.onAccessibilityInspectorUpdated);
 
     this.picker.release();
     this.picker = null;
 
     if (this.front) {
       this.front.off("init", this.updateA11YServiceDurationTimer);
       this.front.off("shutdown", this.updateA11YServiceDurationTimer);
+
+      this.front.off("init", this.forceUpdatePickerButton);
+      this.front.off("shutdown", this.forceUpdatePickerButton);
     }
 
     this._telemetry = null;
     this.panelWin.gToolbox = null;
     this.panelWin.gTelemetry = null;
 
     this.emit("destroyed");
 
--- a/devtools/client/framework/test/browser_toolbox_toolbar_reorder_by_dnd.js
+++ b/devtools/client/framework/test/browser_toolbox_toolbar_reorder_by_dnd.js
@@ -21,57 +21,58 @@
 //   * performance
 //   * memory
 //   * netmonitor
 //   * storage
 
 const { Toolbox } = require("devtools/client/framework/toolbox");
 
 const TEST_STARTING_ORDER = ["inspector", "webconsole", "jsdebugger", "styleeditor",
-                             "performance", "memory", "netmonitor", "storage"];
+                             "performance", "memory", "netmonitor", "storage",
+                             "accessibility"];
 const TEST_DATA = [
   {
     description: "DragAndDrop the target component to back",
     dragTarget: "webconsole",
     dropTarget: "jsdebugger",
     expectedOrder: ["inspector", "jsdebugger", "webconsole", "styleeditor",
-                    "performance", "memory", "netmonitor", "storage"],
+                    "performance", "memory", "netmonitor", "storage", "accessibility"],
   },
   {
     description: "DragAndDrop the target component to front",
     dragTarget: "webconsole",
     dropTarget: "inspector",
     expectedOrder: ["webconsole", "inspector", "jsdebugger", "styleeditor",
-                    "performance", "memory", "netmonitor", "storage"],
+                    "performance", "memory", "netmonitor", "storage", "accessibility"],
   },
   {
     description: "DragAndDrop the target component over the starting of the tab",
     dragTarget: "netmonitor",
     passedTargets: ["memory", "performance", "styleeditor",
                     "jsdebugger", "webconsole", "inspector"],
     dropTarget: "#toolbox-buttons-start",
     expectedOrder: ["netmonitor", "inspector", "webconsole", "jsdebugger",
-                    "styleeditor", "performance", "memory", "storage"],
+                    "styleeditor", "performance", "memory", "storage", "accessibility"],
   },
   {
     description: "DragAndDrop the target component over the ending of the tab",
     dragTarget: "webconsole",
     passedTargets: ["jsdebugger", "styleeditor", "performance",
                     "memory", "netmonitor", "storage"],
     dropTarget: "#toolbox-buttons-end",
     expectedOrder: ["inspector", "jsdebugger", "styleeditor", "performance",
-                    "memory", "netmonitor", "storage", "webconsole", ],
+                    "memory", "netmonitor", "storage", "webconsole", "accessibility"],
   },
   {
     description: "Mouse was out from the document while dragging",
     dragTarget: "webconsole",
     passedTargets: ["inspector"],
     dropTarget: null,
     expectedOrder: ["webconsole", "inspector", "jsdebugger", "styleeditor",
-                    "performance", "memory", "netmonitor", "storage"],
+                    "performance", "memory", "netmonitor", "storage", "accessibility"],
   },
 ];
 
 add_task(async function() {
   const tab = await addTab("about:blank");
   const toolbox = await openToolboxForTab(tab, "inspector", Toolbox.HostType.BOTTOM);
 
   const originalPreference = Services.prefs.getCharPref("devtools.toolbox.tabsOrder");
@@ -94,13 +95,14 @@ add_task(async function() {
 
   info("Test with overflowing tabs");
   prepareToolTabReorderTest(toolbox, TEST_STARTING_ORDER);
   await resizeWindow(toolbox, 800);
   await toolbox.selectTool("storage");
   const dragTarget = "storage";
   const dropTarget = "inspector";
   const expectedOrder = ["storage", "inspector", "webconsole", "jsdebugger",
-                         "styleeditor", "performance", "memory", "netmonitor"];
+                         "styleeditor", "performance", "memory", "netmonitor",
+                         "accessibility"];
   await dndToolTab(toolbox, dragTarget, dropTarget);
   assertToolTabSelected(toolbox, dragTarget);
   assertToolTabPreferenceOrder(expectedOrder);
 });
--- a/devtools/client/framework/test/browser_toolbox_toolbar_reorder_with_extension.js
+++ b/devtools/client/framework/test/browser_toolbox_toolbar_reorder_with_extension.js
@@ -7,17 +7,18 @@
 
 // Test for reordering with an extension installed.
 
 const { Toolbox } = require("devtools/client/framework/toolbox");
 
 const EXTENSION = "@reorder.test";
 
 const TEST_STARTING_ORDER = ["inspector", "webconsole", "jsdebugger", "styleeditor",
-                             "performance", "memory", "netmonitor", "storage", EXTENSION];
+                             "performance", "memory", "netmonitor", "storage",
+                             "accessibility", EXTENSION];
 
 add_task(async function() {
   const extension = ExtensionTestUtils.loadExtension({
     manifest: {
       devtools_page: "extension.html",
       applications: {
         gecko: { id: EXTENSION },
       },
@@ -55,34 +56,35 @@ add_task(async function() {
     Services.prefs.setCharPref("devtools.toolbox.tabsOrder", originalPreference);
     win.resizeTo(originalWindowWidth, originalWindowHeight);
   });
 
   info("Test for DragAndDrop the extension tab");
   let dragTarget = EXTENSION;
   let dropTarget = "webconsole";
   let expectedOrder = ["inspector", EXTENSION, "webconsole", "jsdebugger", "styleeditor",
-                       "performance", "memory", "netmonitor", "storage"];
+                       "performance", "memory", "netmonitor", "storage", "accessibility"];
   prepareToolTabReorderTest(toolbox, TEST_STARTING_ORDER);
   await dndToolTab(toolbox, dragTarget, dropTarget);
   assertToolTabOrder(toolbox, expectedOrder);
   assertToolTabSelected(toolbox, dragTarget);
   assertToolTabPreferenceOrder(expectedOrder);
 
   info("Test the case of that the extension tab is overflowed");
   prepareToolTabReorderTest(toolbox, TEST_STARTING_ORDER);
   await resizeWindow(toolbox, 800);
   await toolbox.selectTool("storage");
   dragTarget = "storage";
   dropTarget = "inspector";
-  expectedOrder = ["storage", "inspector", "webconsole", "jsdebugger",
-                   "styleeditor", "performance", "memory", "netmonitor", EXTENSION];
+  expectedOrder = ["storage", "inspector", "webconsole", "jsdebugger", "styleeditor",
+                   "performance", "memory", "netmonitor", "accessibility", EXTENSION];
   await dndToolTab(toolbox, dragTarget, dropTarget);
   assertToolTabPreferenceOrder(expectedOrder);
 
   info("Test for saving the preference updated after destroying");
   await extension.unload();
   const target = gDevTools.getTargetForTab(tab);
   await gDevTools.closeToolbox(target);
   await target.destroy();
   assertToolTabPreferenceOrder(["storage", "inspector", "webconsole", "jsdebugger",
-                                "styleeditor", "performance", "memory", "netmonitor"]);
+                                "styleeditor", "performance", "memory", "netmonitor",
+                                "accessibility"]);
 });
--- a/devtools/client/framework/toolbox-process-window.js
+++ b/devtools/client/framework/toolbox-process-window.js
@@ -99,17 +99,16 @@ function setPrefDefaults() {
   Services.prefs.setBoolPref("browser.dom.window.dump.enabled", true);
   Services.prefs.setBoolPref("devtools.command-button-noautohide.enabled", true);
   // Bug 1225160 - Using source maps with browser debugging can lead to a crash
   Services.prefs.setBoolPref("devtools.debugger.source-maps-enabled", false);
   Services.prefs.setBoolPref("devtools.debugger.new-debugger-frontend", true);
   Services.prefs.setBoolPref("devtools.preference.new-panel-enabled", false);
   Services.prefs.setBoolPref("layout.css.emulate-moz-box-with-flex", false);
 
-  Services.prefs.setBoolPref("devtools.accessibility.enabled", true);
   Services.prefs.setBoolPref("devtools.performance.enabled", false);
 }
 
 window.addEventListener("load", async function() {
   const cmdClose = document.getElementById("toolbox-cmd-close");
   cmdClose.addEventListener("command", onCloseCommand);
   setPrefDefaults();
   // Reveal status message if connecting is slow or if an error occurs.
--- a/devtools/client/inspector/inspector.js
+++ b/devtools/client/inspector/inspector.js
@@ -1700,17 +1700,23 @@ Inspector.prototype = {
     }
 
     const showA11YPropsItem = new MenuItem({
       id: "node-menu-showaccessibilityproperties",
       label: INSPECTOR_L10N.getStr("inspectorShowAccessibilityProperties.label"),
       click: () => this.showAccessibilityProperties(),
       disabled: true
     });
-    this._updateA11YMenuItem(showA11YPropsItem);
+    // Only attempt to determine if a11y props menu item needs to be enabled iff
+    // AccessibilityFront is enabled.
+    const accessibilityFront = this.target.getFront("accessibility");
+    if (accessibilityFront.enabled) {
+      this._updateA11YMenuItem(showA11YPropsItem);
+    }
+
     menu.append(showA11YPropsItem);
   },
 
   _updateA11YMenuItem: async function(menuItem) {
     const hasMethod = await this.target.actorHasMethod("domwalker",
                                                        "hasAccessibilityProperties");
     if (!hasMethod) {
       return;
--- a/devtools/client/netmonitor/test/head.js
+++ b/devtools/client/netmonitor/test/head.js
@@ -349,19 +349,18 @@ function teardown(monitor) {
   info("Destroying the specified network monitor.");
 
   return (async function() {
     const tab = monitor.toolbox.target.tab;
 
     await waitForAllNetworkUpdateEvents();
     info("All pending requests finished.");
 
-    const onDestroyed = monitor.once("destroyed");
+    await monitor.toolbox.destroy();
     await removeTab(tab);
-    await onDestroyed;
   })();
 }
 
 function waitForNetworkEvents(monitor, getRequests) {
   return new Promise((resolve) => {
     const panel = monitor.panelWin;
     const { getNetworkRequest } = panel.connector;
     let networkEvent = 0;
--- a/devtools/client/preferences/devtools-client.js
+++ b/devtools/client/preferences/devtools-client.js
@@ -223,18 +223,18 @@ pref("devtools.canvasdebugger.enabled", 
 pref("devtools.webaudioeditor.enabled", false);
 
 // Enable Scratchpad
 pref("devtools.scratchpad.enabled", false);
 
 // Make sure the DOM panel is hidden by default
 pref("devtools.dom.enabled", false);
 
-// Make sure the Accessibility panel is hidden by default
-pref("devtools.accessibility.enabled", false);
+// Enable the Accessibility panel.
+pref("devtools.accessibility.enabled", true);
 // Counter to promote the Accessibility panel.
 // @remove after release 63 (See Bug 1482461)
 pref("devtools.promote.accessibility", 1);
 
 // Web Audio Editor Inspector Width should be a preference
 pref("devtools.webaudioeditor.inspectorWidth", 300);
 
 // Web console filters
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -1263,16 +1263,44 @@ FinishAnyIncrementalGC()
 static void
 FireForgetSkippable(uint32_t aSuspected, bool aRemoveChildless,
                     TimeStamp aDeadline)
 {
   AUTO_PROFILER_TRACING("CC", aDeadline.IsNull() ? "ForgetSkippable"
                                                  : "IdleForgetSkippable");
   PRTime startTime = PR_Now();
   TimeStamp startTimeStamp = TimeStamp::Now();
+
+  static uint32_t sForgetSkippableCounter = 0;
+  static TimeStamp sForgetSkippableFrequencyStartTime;
+  static TimeStamp sLastForgetSkippableEndTime;
+  static const TimeDuration minute = TimeDuration::FromSeconds(60.0f);
+
+  if (sForgetSkippableFrequencyStartTime.IsNull()) {
+    sForgetSkippableFrequencyStartTime = startTimeStamp;
+  } else if (startTimeStamp - sForgetSkippableFrequencyStartTime > minute) {
+    TimeStamp startPlusMinute = sForgetSkippableFrequencyStartTime + minute;
+
+    // If we had forget skippables only at the beginning of the interval, we
+    // still want to use the whole time, minute or more, for frequency
+    // calculation. sLastForgetSkippableEndTime is needed if forget skippable
+    // takes enough time to push the interval to be over a minute.
+    TimeStamp endPoint = startPlusMinute > sLastForgetSkippableEndTime ?
+      startPlusMinute : sLastForgetSkippableEndTime;
+
+    // Duration in minutes.
+    double duration =
+      (endPoint - sForgetSkippableFrequencyStartTime).ToSeconds() / 60;
+    uint32_t frequencyPerMinute = uint32_t(sForgetSkippableCounter / duration);
+    Telemetry::Accumulate(Telemetry::FORGET_SKIPPABLE_FREQUENCY, frequencyPerMinute);
+    sForgetSkippableCounter = 0;
+    sForgetSkippableFrequencyStartTime = startTimeStamp;
+  }
+  ++sForgetSkippableCounter;
+
   FinishAnyIncrementalGC();
   bool earlyForgetSkippable =
     sCleanupsSinceLastGC < NS_MAJOR_FORGET_SKIPPABLE_CALLS;
 
   int64_t budgetMs = aDeadline.IsNull() ?
     kForgetSkippableSliceDuration :
     int64_t((aDeadline - TimeStamp::Now()).ToMilliseconds());
   js::SliceBudget budget = js::SliceBudget(js::TimeBudget(budgetMs));
@@ -1287,16 +1315,18 @@ FireForgetSkippable(uint32_t aSuspected,
   if (sMaxForgetSkippableTime < delta) {
     sMaxForgetSkippableTime = delta;
   }
   sTotalForgetSkippableTime += delta;
   sRemovedPurples += (aSuspected - sPreviousSuspectedCount);
   ++sForgetSkippableBeforeCC;
 
   TimeStamp now = TimeStamp::Now();
+  sLastForgetSkippableEndTime = now;
+
   TimeDuration duration = now - startTimeStamp;
   if (duration.ToSeconds()) {
     TimeDuration idleDuration;
     if (!aDeadline.IsNull()) {
       if (aDeadline < now) {
         // This slice overflowed the idle period.
         if (aDeadline > startTimeStamp) {
           idleDuration = aDeadline - startTimeStamp;
deleted file mode 100644
--- a/gfx/doc/AdvancedLayers.md
+++ /dev/null
@@ -1,308 +0,0 @@
-Advanced Layers
-==============
-
-Advanced Layers is a new method of compositing layers in Gecko. This document serves as a technical
-overview and provides a short walk-through of its source code.
-
-Overview
--------------
-
-Advanced Layers attempts to group as many GPU operations as it can into a single draw call. This is
-a common technique in GPU-based rendering called "batching". It is not always trivial, as a
-batching algorithm can easily waste precious CPU resources trying to build optimal draw calls.
-
-Advanced Layers reuses the existing Gecko layers system as much as possible. Huge layer trees do
-not currently scale well (see the future work section), so opportunities for batching are currently
-limited without expending unnecessary resources elsewhere. However, Advanced Layers has a few
-benefits:
-
- * It submits smaller GPU workloads and buffer uploads than the existing compositor.
- * It needs only a single pass over the layer tree.
- * It uses occlusion information more intelligently.
- * It is easier to add new specialized rendering paths and new layer types.
- * It separates compositing logic from device logic, unlike the existing compositor.
- * It is much faster at rendering 3d scenes or complex layer trees.
- * It has experimental code to use the z-buffer for occlusion culling.
-
-Because of these benefits we hope that it provides a significant improvement over the existing
-compositor.
-
-Advanced Layers uses the acronym "MLG" and "MLGPU" in many places. This stands for "Mid-Level
-Graphics", the idea being that it is optimized for Direct3D 11-style rendering systems as opposed
-to Direct3D 12 or Vulkan.
-
-LayerManagerMLGPU
-------------------------------
-
-Advanced layers does not change client-side rendering at all. Content still uses Direct2D (when
-possible), and creates identical layer trees as it would with a normal Direct3D 11 compositor. In
-fact, Advanced Layers re-uses all of the existing texture handling and video infrastructure as
-well, replacing only the composite-side layer types.
-
-Advanced Layers does not create a `LayerManagerComposite` - instead, it creates a
-`LayerManagerMLGPU`. This layer manager does not have a `Compositor` - instead, it has an
-`MLGDevice`, which roughly abstracts the Direct3D 11 API. (The hope is that this API is easily
-interchangeable for something else when cross-platform or software support is needed.)
-
-`LayerManagerMLGPU` also dispenses with the old "composite" layers for new layer types. For
-example, `ColorLayerComposite` becomes `ColorLayerMLGPU`. Since these layer types implement
-`HostLayer`, they integrate with `LayerTransactionParent` as normal composite layers would.
-
-Rendering Overview
-----------------------------
-
-The steps for rendering are described in more detail below, but roughly the process is:
-
-1. Sort layers front-to-back.
-2. Create a dependency tree of render targets (called "views").
-3. Accumulate draw calls for all layers in each view.
-4. Upload draw call buffers to the GPU.
-5. Execute draw commands for each view.
-
-Advanced Layers divides the layer tree into "views" (`RenderViewMLGPU`), which correspond to a
-render target. The root layer is represented by a view corresponding to the screen. Layers that
-require intermediate surfaces have temporary views. Layers are analyzed front-to-back, and rendered
-back-to-front within a view. Views themselves are rendered front-to-back, to minimize render target
-switching.
-
-Each view contains one or more rendering passes (`RenderPassMLGPU`). A pass represents a single
-draw command with one or more rendering items attached to it. For example, a `SolidColorPass` item
-contains a rectangle and an RGBA value, and many of these can be drawn with a single GPU call.
-
-When considering a layer, views will first try to find an existing rendering batch that can support
-it. If so, that pass will accumulate another draw item for the layer. Otherwise, a new pass will be
-added.
-
-When trying to find a matching pass for a layer, there is a tradeoff in CPU time versus the GPU
-time saved by not issuing another draw commands. We generally care more about CPU time, so we do
-not try too hard in matching items to an existing batch.
-
-After all layers have been processed, there is a "prepare" step. This copies all accumulated draw
-data and uploads it into vertex and constant buffers in the GPU.
-
-Finally, we execute rendering commands. At the end of the frame, all batches and (most) constant
-buffers are thrown away.
-
-Shaders Overview
--------------------------------------
-
-Advanced Layers currently has five layer-related shader pipelines:
-
- - Textured (PaintedLayer, ImageLayer, CanvasLayer)
- - ComponentAlpha (PaintedLayer with component-alpha)
- - YCbCr (ImageLayer with YCbCr video)
- - Color (ColorLayers)
- - Blend (ContainerLayers with mix-blend modes)
-
-There are also three special shader pipelines:
-
- - MaskCombiner, which is used to combine mask layers into a single texture.
- - Clear, which is used for fast region-based clears when not directly supported by the GPU.
- - Diagnostic, which is used to display the diagnostic overlay texture.
-
-The layer shaders follow a unified structure. Each pipeline has a vertex and pixel shader.
-The vertex shader takes a layers ID, a z-buffer depth, a unit position in either a unit
-square or unit triangle, and either rectangular or triangular geometry. Shaders can also
-have ancillary data needed like texture coordinates or colors.
-
-Most of the time, layers have simple rectangular clips with simple rectilinear transforms, and
-pixel shaders do not need to perform masking or clipping. For these layers we use a fast-path
-pipeline, using unit-quad shaders that are able to clip geometry so the pixel shader does not
-have to. This type of pipeline does not support complex masks.
-
-If a layer has a complex mask, a rotation or 3d transform, or a complex operation like blending,
-then we use shaders capable of handling arbitrary geometry. Their input is a unit triangle,
-and these shaders are generally more expensive.
-
-All of the shader-specific data is modelled in ShaderDefinitionsMLGPU.h.
-
-CPU Occlusion Culling
--------------------------------------
-
-By default, Advanced Layers performs occlusion culling on the CPU. Since layers are visited
-front-to-back, this is simply a matter of accumulating the visible region of opaque layers, and
-subtracting it from the visible region of subsequent layers. There is a major difference
-between this occlusion culling and PostProcessLayers of the old compositor: AL performs culling
-after invalidation, not before. Completely valid layers will have an empty visible region.
-
-Most layer types (with the exception of images) will intelligently split their draw calls into a
-batch of individual rectangles, based on their visible region.
-
-Z-Buffering and Occlusion
--------------------------------------
-
-Advanced Layers also supports occlusion culling on the GPU, using a z-buffer. This is disabled by
-default currently since it is significantly costly on integrated GPUs. When using the z-buffer, we
-separate opaque layers into a separate list of passes. The render process then uses the following
-steps:
-
- 1. The depth buffer is set to read-write.
- 2. Opaque batches are executed.,
- 3. The depth buffer is set to read-only.
- 4. Transparent batches are executed.
-
-The problem we have observed is that the depth buffer increases writes to the GPU, and on
-integrated GPUs this is expensive - we have seen draw call times increase by 20-30%, which is the
-wrong direction we want to take on battery life. In particular on a full screen video, the call to
-ClearDepthStencilView plus the actual depth buffer write of the video can double GPU time.
-
-For now the depth-buffer is disabled until we can find a compelling case for it on non-integrated
-hardware.
-
-Clipping
--------------------------------------
-
-Clipping is a bit tricky in Advanced Layers. We cannot use the hardware "scissor" feature, since the
-clip can change from instance to instance within a batch. And if using the depth buffer, we
-cannot write transparent pixels for the clipped area. As a result we always clip opaque draw rects
-in the vertex shader (and sometimes even on the CPU, as is needed for sane texture coordiantes).
-Only transparent items are clipped in the pixel shader. As a result, masked layers and layers with
-non-rectangular transforms are always considered transparent, and use a more flexible clipping
-pipeline.
-
-Plane Splitting
----------------------
-
-Plane splitting is when a 3D transform causes a layer to be split - for example, one transparent
-layer may intersect another on a separate plane. When this happens, Gecko sorts layers using a BSP
-tree and produces a list of triangles instead of draw rects.
-
-These layers cannot use the "unit quad" shaders that support the fast clipping pipeline. Instead
-they always use the full triangle-list shaders that support extended vertices and clipping.
-
-This is the slowest path we can take when building a draw call, since we must interact with the
-polygon clipping and texturing code.
-
-Masks
----------
-
-For each layer with a mask attached, Advanced Layers builds a `MaskOperation`. These operations
-must resolve to a single mask texture, as well as a rectangular area to which the mask applies. All
-batched pixel shaders will automatically clip pixels to the mask if a mask texture is bound. (Note
-that we must use separate batches if the mask texture changes.)
-
-Some layers have multiple mask textures. In this case, the MaskOperation will store the list of
-masks, and right before rendering, it will invoke a shader to combine these masks into a single texture.
-
-MaskOperations are shared across layers when possible, but are not cached across frames.
-
-BigImage Support
---------------------------
-
-ImageLayers and CanvasLayers can be tiled with many individual textures. This happens in rare cases
-where the underlying buffer is too big for the GPU. Early on this caused problems for Advanced
-Layers, since AL required one texture per layer. We implemented BigImage support by creating
-temporary ImageLayers for each visible tile, and throwing those layers away at the end of the
-frame.
-
-Advanced Layers no longer has a 1:1 layer:texture restriction, but we retain the temporary layer
-solution anyway. It is not much code and it means we do not have to split `TexturedLayerMLGPU`
-methods into iterated and non-iterated versions.
-
-Texture Locking
-----------------------
-
-Advanced Layers has a different texture locking scheme than the existing compositor. If a texture
-needs to be locked, then it is locked by the MLGDevice automatically when bound to the current
-pipeline. The MLGDevice keeps a set of the locked textures to avoid double-locking. At the end of
-the frame, any textures in the locked set are unlocked.
-
-We cannot easily replicate the locking scheme in the old compositor, since the duration of using
-the texture is not scoped to when we visit the layer.
-
-Buffer Measurements
--------------------------------
-
-Advanced Layers uses constant buffers to send layer information and extended instance data to the
-GPU. We do this by pre-allocating large constant buffers and mapping them with `MAP_DISCARD` at the
-beginning of the frame. Batches may allocate into this up to the maximum bindable constant buffer
-size of the device (currently, 64KB).
-
-There are some downsides to this approach. Constant buffers are difficult to work with - they have
-specific alignment requirements, and care must be taken not too run over the maximum number of
-constants in a buffer. Another approach would be to store constants in a 2D texture and use vertex
-shader texture fetches. Advanced Layers implemented this and benchmarked it to decide which
-approach to use. Textures seemed to skew better on GPU performance, but worse on CPU, but this
-varied depending on the GPU. Overall constant buffers performed best and most consistently, so we
-have kept them.
-
-Additionally, we tested different ways of performing buffer uploads. Buffer creation itself is
-costly, especially on integrated GPUs, and especially so for immutable, immediate-upload buffers.
-As a result we aggressively cache buffer objects and always allocate them as MAP_DISCARD unless
-they are write-once and long-lived.
-
-Buffer Types
-------------
-
-Advanced Layers has a few different classes to help build and upload buffers to the GPU. They are:
-
- - `MLGBuffer`. This is the low-level shader resource that `MLGDevice` exposes. It is the building
-   block for buffer helper classes, but it can also be used to make one-off, immutable,
-   immediate-upload buffers. MLGBuffers, being a GPU resource, are reference counted.
- - `SharedBufferMLGPU`. These are large, pre-allocated buffers that are read-only on the GPU and
-   write-only on the CPU. They usually exceed the maximum bindable buffer size. There are three
-   shared buffers created by default and they are automatically unmapped as needed: one for vertices,
-   one for vertex shader constants, and one for pixel shader constants. When callers allocate into a
-   shared buffer they get back a mapped pointer, a GPU resource, and an offset. When the underlying
-   device supports offsetable buffers (like `ID3D11DeviceContext1` does), this results in better GPU
-   utilization, as there are less resources and fewer upload commands.
- - `ConstantBufferSection` and `VertexBufferSection`. These are "views" into a `SharedBufferMLGPU`.
-   They contain the underlying `MLGBuffer`, and when offsetting is supported, the offset
-   information necessary for resource binding. Sections are not reference counted.
- - `StagingBuffer`. A dynamically sized CPU buffer where items can be appended in a free-form
-   manner. The stride of a single "item" is computed by the first item written, and successive
-   items must have the same stride. The buffer must be uploaded to the GPU manually. Staging buffers
-   are appropriate for creating general constant or vertex buffer data. They can also write items in
-   reverse, which is how we render back-to-front when layers are visited front-to-back. They can be
-   uploaded to a `SharedBufferMLGPU` or an immutabler `MLGBuffer` very easily. Staging buffers are not
-   reference counted.
-
-Unsupported Features
---------------------------------
-
-Currently, these features of the old compositor are not yet implemented.
-
- - OpenGL and software support (currently AL only works on D3D11).
- - APZ displayport overlay.
- - Diagnostic/developer overlays other than the FPS/timing overlay.
- - DEAA. It was never ported to the D3D11 compositor, but we would like it.
- - Component alpha when used inside an opaque intermediate surface.
- - Effects prefs. Possibly not needed post-B2G removal.
- - Widget overlays and underlays used by macOS and Android.
- - DefaultClearColor. This is Android specific, but is easy to added when needed.
- - Frame uniformity info in the profiler. Possibly not needed post-B2G removal.
- - LayerScope. There are no plans to make this work.
-
-Future Work
---------------------------------
-
- - Refactor for D3D12/Vulkan support (namely, split MLGDevice into something less stateful and something else more low-level).
- - Remove "MLG" moniker and namespace everything.
- - Other backends (D3D12/Vulkan, OpenGL, Software)
- - Delete CompositorD3D11
- - Add DEAA support
- - Re-enable the depth buffer by default for fast GPUs
- - Re-enable right-sizing of inaccurately sized containers
- - Drop constant buffers for ancillary vertex data
- - Fast shader paths for simple video/painted layer cases
-
-History
-----------
-
-Advanced Layers has gone through four major design iterations. The initial version used tiling -
-each render view divided the screen into 128x128 tiles, and layers were assigned to tiles based on
-their screen-space draw area. This approach proved not to scale well to 3d transforms, and so
-tiling was eliminated.
-
-We replaced it with a simple system of accumulating draw regions to each batch, thus ensuring that
-items could be assigned to batches while maintaining correct z-ordering. This second iteration also
-coincided with plane-splitting support.
-
-On large layer trees, accumulating the affected regions of batches proved to be quite expensive.
-This led to a third iteration, using depth buffers and separate opaque and transparent batch lists
-to achieve z-ordering and occlusion culling.
-
-Finally, depth buffers proved to be too expensive, and we introduced a simple CPU-based occlusion
-culling pass. This iteration coincided with using more precise draw rects and splitting pipelines
-into unit-quad, cpu-clipped and triangle-list, gpu-clipped variants.
-
deleted file mode 100644
--- a/gfx/doc/AsyncPanZoom.md
+++ /dev/null
@@ -1,299 +0,0 @@
-Asynchronous Panning and Zooming {#apz}
-================================
-
-**This document is a work in progress.  Some information may be missing or incomplete.**
-
-## Goals
-
-We need to be able to provide a visual response to user input with minimal latency.
-In particular, on devices with touch input, content must track the finger exactly while panning, or the user experience is very poor.
-According to the UX team, 120ms is an acceptable latency between user input and response.
-
-## Context and surrounding architecture
-
-The fundamental problem we are trying to solve with the Asynchronous Panning and Zooming (APZ) code is that of responsiveness.
-By default, web browsers operate in a "game loop" that looks like this:
-
-    while true:
-        process input
-        do computations
-        repaint content
-        display repainted content
-
-In browsers the "do computation" step can be arbitrarily expensive because it can involve running event handlers in web content.
-Therefore, there can be an arbitrary delay between the input being received and the on-screen display getting updated.
-
-Responsiveness is always good, and with touch-based interaction it is even more important than with mouse or keyboard input.
-In order to ensure responsiveness, we split the "game loop" model of the browser into a multithreaded variant which looks something like this:
-
-    Thread 1 (compositor thread)
-    while true:
-        receive input
-        send a copy of input to thread 2
-        adjust painted content based on input
-        display adjusted painted content
-    
-    Thread 2 (main thread)
-    while true:
-        receive input from thread 1
-        do computations
-        repaint content
-        update the copy of painted content in thread 1
-
-This multithreaded model is called off-main-thread compositing (OMTC), because the compositing (where the content is displayed on-screen) happens on a separate thread from the main thread.
-Note that this is a very very simplified model, but in this model the "adjust painted content based on input" is the primary function of the APZ code.
-
-The "painted content" is stored on a set of "layers", that are conceptually double-buffered.
-That is, when the main thread does its repaint, it paints into one set of layers (the "client" layers).
-The update that is sent to the compositor thread copies all the changes from the client layers into another set of layers that the compositor holds.
-These layers are called the "shadow" layers or the "compositor" layers.
-The compositor in theory can continuously composite these shadow layers to the screen while the main thread is busy doing other things and painting a new set of client layers.
-
-The APZ code takes the input events that are coming in from the hardware and uses them to figure out what the user is trying to do (e.g. pan the page, zoom in).
-It then expresses this user intention in the form of translation and/or scale transformation matrices.
-These transformation matrices are applied to the shadow layers at composite time, so that what the user sees on-screen reflects what they are trying to do as closely as possible.
-
-## Technical overview
-
-As per the heavily simplified model described above, the fundamental purpose of the APZ code is to take input events and produce transformation matrices.
-This section attempts to break that down and identify the different problems that make this task non-trivial.
-
-### Checkerboarding
-
-The content area that is painted and stored in a shadow layer is called the "displayport".
-The APZ code is responsible for determining how large the displayport should be.
-On the one hand, we want the displayport to be as large as possible.
-At the very least it needs to be larger than what is visible on-screen, because otherwise, as soon as the user pans, there will be some unpainted area of the page exposed.
-However, we cannot always set the displayport to be the entire page, because the page can be arbitrarily long and this would require an unbounded amount of memory to store.
-Therefore, a good displayport size is one that is larger than the visible area but not so large that it is a huge drain on memory.
-Because the displayport is usually smaller than the whole page, it is always possible for the user to scroll so fast that they end up in an area of the page outside the displayport.
-When this happens, they see unpainted content; this is referred to as "checkerboarding", and we try to avoid it where possible.
-
-There are many possible ways to determine what the displayport should be in order to balance the tradeoffs involved (i.e. having one that is too big is bad for memory usage, and having one that is too small results in excessive checkerboarding).
-Ideally, the displayport should cover exactly the area that we know the user will make visible.
-Although we cannot know this for sure, we can use heuristics based on current panning velocity and direction to ensure a reasonably-chosen displayport area.
-This calculation is done in the APZ code, and a new desired displayport is frequently sent to the main thread as the user is panning around.
-
-### Multiple layers
-
-Consider, for example, a scrollable page that contains an iframe which itself is scrollable.
-The iframe can be scrolled independently of the top-level page, and we would like both the page and the iframe to scroll responsively.
-This means that we want independent asynchronous panning for both the top-level page and the iframe.
-In addition to iframes, elements that have the overflow:scroll CSS property set are also scrollable, and also end up on separate scrollable layers.
-In the general case, the layers are arranged in a tree structure, and so within the APZ code we have a matching tree of AsyncPanZoomController (APZC) objects, one for each scrollable layer.
-To manage this tree of APZC instances, we have a single APZCTreeManager object.
-Each APZC is relatively independent and handles the scrolling for its associated layer, but there are some cases in which they need to interact; these cases are described in the sections below.
-
-### Hit detection
-
-Consider again the case where we have a scrollable page that contains an iframe which itself is scrollable.
-As described above, we will have two APZC instances - one for the page and one for the iframe.
-When the user puts their finger down on the screen and moves it, we need to do some sort of hit detection in order to determine whether their finger is on the iframe or on the top-level page.
-Based on where their finger lands, the appropriate APZC instance needs to handle the input.
-This hit detection is also done in the APZCTreeManager, as it has the necessary information about the sizes and positions of the layers.
-Currently this hit detection is not perfect, as it uses rects and does not account for things like rounded corners and opacity.
-
-Also note that for some types of input (e.g. when the user puts two fingers down to do a pinch) we do not want the input to be "split" across two different APZC instances.
-In the case of a pinch, for example, we find a "common ancestor" APZC instance - one that is zoomable and contains all of the touch input points, and direct the input to that APZC instance.
-
-### Scroll Handoff
-
-Consider yet again the case where we have a scrollable page that contains an iframe which itself is scrollable.
-Say the user scrolls the iframe so that it reaches the bottom.
-If the user continues panning on the iframe, the expectation is that the top-level page will start scrolling.
-However, as discussed in the section on hit detection, the APZC instance for the iframe is separate from the APZC instance for the top-level page.
-Thus, we need the two APZC instances to communicate in some way such that input events on the iframe result in scrolling on the top-level page.
-This behaviour is referred to as "scroll handoff" (or "fling handoff" in the case where analogous behaviour results from the scrolling momentum of the page after the user has lifted their finger).
-
-### Input event untransformation
-
-The APZC architecture by definition results in two copies of a "scroll position" for each scrollable layer.
-There is the original copy on the main thread that is accessible to web content and the layout and painting code.
-And there is a second copy on the compositor side, which is updated asynchronously based on user input, and corresponds to what the user visually sees on the screen.
-Although these two copies may diverge temporarily, they are reconciled periodically.
-In particular, they diverge while the APZ code is performing an async pan or zoom action on behalf of the user, and are reconciled when the APZ code requests a repaint from the main thread.
-
-Because of the way input events are stored, this has some unfortunate consequences.
-Input events are stored relative to the device screen - so if the user touches at the same physical spot on the device, the same input events will be delivered regardless of the content scroll position.
-When the main thread receives a touch event, it combines that with the content scroll position in order to figure out what DOM element the user touched.
-However, because we now have two different scroll positions, this process may not work perfectly.
-A concrete example follows:
-
-Consider a device with screen size 600 pixels tall.
-On this device, a user is viewing a document that is 1000 pixels tall, and that is scrolled down by 200 pixels.
-That is, the vertical section of the document from 200px to 800px is visible.
-Now, if the user touches a point 100px from the top of the physical display, the hardware will generate a touch event with y=100.
-This will get sent to the main thread, which will add the scroll position (200) and get a document-relative touch event with y=300.
-This new y-value will be used in hit detection to figure out what the user touched.
-If the document had a absolute-positioned div at y=300, then that would receive the touch event.
-
-Now let us add some async scrolling to this example.
-Say that the user additionally scrolls the document by another 10 pixels asynchronously (i.e. only on the compositor thread), and then does the same touch event.
-The same input event is generated by the hardware, and as before, the document will deliver the touch event to the div at y=300.
-However, visually, the document is scrolled by an additional 10 pixels so this outcome is wrong.
-What needs to happen is that the APZ code needs to intercept the touch event and account for the 10 pixels of asynchronous scroll.
-Therefore, the input event with y=100 gets converted to y=110 in the APZ code before being passed on to the main thread.
-The main thread then adds the scroll position it knows about and determines that the user touched at a document-relative position of y=310.
-
-Analogous input event transformations need to be done for horizontal scrolling and zooming.
-
-### Content independently adjusting scrolling
-
-As described above, there are two copies of the scroll position in the APZ architecture - one on the main thread and one on the compositor thread.
-Usually for architectures like this, there is a single "source of truth" value and the other value is simply a copy.
-However, in this case that is not easily possible to do.
-The reason is that both of these values can be legitimately modified.
-On the compositor side, the input events the user is triggering modify the scroll position, which is then propagated to the main thread.
-However, on the main thread, web content might be running Javascript code that programatically sets the scroll position (via window.scrollTo, for example).
-Scroll changes driven from the main thread are just as legitimate and need to be propagated to the compositor thread, so that the visual display updates in response.
-
-Because the cross-thread messaging is asynchronous, reconciling the two types of scroll changes is a tricky problem.
-Our design solves this using various flags and generation counters.
-The general heuristic we have is that content-driven scroll position changes (e.g. scrollTo from JS) are never lost.
-For instance, if the user is doing an async scroll with their finger and content does a scrollTo in the middle, then some of the async scroll would occur before the "jump" and the rest after the "jump".
-
-### Content preventing default behaviour of input events
-
-Another problem that we need to deal with is that web content is allowed to intercept touch events and prevent the "default behaviour" of scrolling.
-This ability is defined in web standards and is non-negotiable.
-Touch event listeners in web content are allowed call preventDefault() on the touchstart or first touchmove event for a touch point; doing this is supposed to "consume" the event and prevent touch-based panning.
-As we saw in a previous section, the input event needs to be untransformed by the APZ code before it can be delivered to content.
-But, because of the preventDefault problem, we cannot fully process the touch event in the APZ code until content has had a chance to handle it.
-Web browsers in general solve this problem by inserting a delay of up to 300ms before processing the input - that is, web content is allowed up to 300ms to process the event and call preventDefault on it.
-If web content takes longer than 300ms, or if it completes handling of the event without calling preventDefault, then the browser immediately starts processing the events.
-
-The way the APZ implementation deals with this is that upon receiving a touch event, it immediately returns an untransformed version that can be dispatched to content.
-It also schedules a 400ms timeout (600ms on Android) during which content is allowed to prevent scrolling.
-There is an API that allows the main-thread event dispatching code to notify the APZ as to whether or not the default action should be prevented.
-If the APZ content response timeout expires, or if the main-thread event dispatching code notifies the APZ of the preventDefault status, then the APZ continues with the processing of the events (which may involve discarding the events).
-
-The touch-action CSS property from the pointer-events spec is intended to allow eliminating this 400ms delay in many cases (although for backwards compatibility it will still be needed for a while).
-Note that even with touch-action implemented, there may be cases where the APZ code does not know the touch-action behaviour of the point the user touched.
-In such cases, the APZ code will still wait up to 400ms for the main thread to provide it with the touch-action behaviour information.
-
-## Technical details
-
-This section describes various pieces of the APZ code, and goes into more specific detail on APIs and code than the previous sections.
-The primary purpose of this section is to help people who plan on making changes to the code, while also not going into so much detail that it needs to be updated with every patch.
-
-### Overall flow of input events
-
-This section describes how input events flow through the APZ code.
-<ol>
-<li value="1">
-Input events arrive from the hardware/widget code into the APZ via APZCTreeManager::ReceiveInputEvent.
-The thread that invokes this is called the input thread, and may or may not be the same as the Gecko main thread.
-</li>
-<li value="2">
-Conceptually the first thing that the APZCTreeManager does is to associate these events with "input blocks".
-An input block is a set of events that share certain properties, and generally are intended to represent a single gesture.
-For example with touch events, all events following a touchstart up to but not including the next touchstart are in the same block.
-All of the events in a given block will go to the same APZC instance and will either all be processed or all be dropped.
-</li>
-<li value="3">
-Using the first event in the input block, the APZCTreeManager does a hit-test to see which APZC it hits.
-This hit-test uses the event regions populated on the layers, which may be larger than the true hit area of the layer.
-If no APZC is hit, the events are discarded and we jump to step 6.
-Otherwise, the input block is tagged with the hit APZC as a tentative target and put into a global APZ input queue.
-</li>
-<li value="4">
- <ol>
-  <li value="i">
-   If the input events landed outside the dispatch-to-content event region for the layer, any available events in the input block are processed.
-   These may trigger behaviours like scrolling or tap gestures.
-  </li>
-  <li value="ii">
-   If the input events landed inside the dispatch-to-content event region for the layer, the events are left in the queue and a 400ms timeout is initiated.
-   If the timeout expires before step 9 is completed, the APZ assumes the input block was not cancelled and the tentative target is correct, and processes them as part of step 10.
-  </li>
- </ol>
-</li>
-<li value="5">
-The call stack unwinds back to APZCTreeManager::ReceiveInputEvent, which does an in-place modification of the input event so that any async transforms are removed.
-</li>
-<li value="6">
-The call stack unwinds back to the widget code that called ReceiveInputEvent.
-This code now has the event in the coordinate space Gecko is expecting, and so can dispatch it to the Gecko main thread.
-</li>
-<li value="7">
-Gecko performs its own usual hit-testing and event dispatching for the event.
-As part of this, it records whether any touch listeners cancelled the input block by calling preventDefault().
-It also activates inactive scrollframes that were hit by the input events.
-</li>
-<li value="8">
-The call stack unwinds back to the widget code, which sends two notifications to the APZ code on the input thread.
-The first notification is via APZCTreeManager::ContentReceivedInputBlock, and informs the APZ whether the input block was cancelled.
-The second notification is via APZCTreeManager::SetTargetAPZC, and informs the APZ of the results of the Gecko hit-test during event dispatch.
-Note that Gecko may report that the input event did not hit any scrollable frame at all.
-The SetTargetAPZC notification happens only once per input block, while the ContentReceivedInputBlock notification may happen once per block, or multiple times per block, depending on the input type.
-</li>
-<li value="9">
- <ol>
-  <li value="i">
-   If the events were processed as part of step 4(i), the notifications from step 8 are ignored and step 10 is skipped.
-  </li>
-  <li value="ii">
-   If events were queued as part of step 4(ii), and steps 5-8 take less than 400ms, the arrival of both notifications from step 8 will mark the input block ready for processing.
-  </li>
-  <li value="iii">
-   If events were queued as part of step 4(ii), but steps 5-8 take longer than 400ms, the notifications from step 8 will be ignored and step 10 will already have happened.
-  </li>
- </ol>
-</li>
-<li value="10">
-If events were queued as part of step 4(ii) they are now either processed (if the input block was not cancelled and Gecko detected a scrollframe under the input event, or if the timeout expired) or dropped (all other cases).
-Note that the APZC that processes the events may be different at this step than the tentative target from step 3, depending on the SetTargetAPZC notification.
-Processing the events may trigger behaviours like scrolling or tap gestures.
-</li>
-</ol>
-
-If the CSS touch-action property is enabled, the above steps are modified as follows:
-<ul>
-<li>
- In step 4, the APZC also requires the allowed touch-action behaviours for the input event.
- This might have been determined as part of the hit-test in APZCTreeManager; if not, the events are queued.
-</li>
-<li>
- In step 6, the widget code determines the content element at the point under the input element, and notifies the APZ code of the allowed touch-action behaviours.
- This notification is sent via a call to APZCTreeManager::SetAllowedTouchBehavior on the input thread.
-</li>
-<li>
- In step 9(ii), the input block will only be marked ready for processing once all three notifications arrive.
-</li>
-</ul>
-
-#### Threading considerations
-
-The bulk of the input processing in the APZ code happens on what we call "the input thread".
-In practice the input thread could be the Gecko main thread, the compositor thread, or some other thread.
-There are obvious downsides to using the Gecko main thread - that is, "asynchronous" panning and zooming is not really asynchronous as input events can only be processed while Gecko is idle.
-In an e10s environment, using the Gecko main thread of the chrome process is acceptable, because the code running in that process is more controllable and well-behaved than arbitrary web content.
-Using the compositor thread as the input thread could work on some platforms, but may be inefficient on others.
-For example, on Android (Fennec) we receive input events from the system on a dedicated UI thread.
-We would have to redispatch the input events to the compositor thread if we wanted to the input thread to be the same as the compositor thread.
-This introduces a potential for higher latency, particularly if the compositor does any blocking operations - blocking SwapBuffers operations, for example.
-As a result, the APZ code itself does not assume that the input thread will be the same as the Gecko main thread or the compositor thread.
-
-#### Active vs. inactive scrollframes
-
-The number of scrollframes on a page is potentially unbounded.
-However, we do not want to create a separate layer for each scrollframe right away, as this would require large amounts of memory.
-Therefore, scrollframes as designated as either "active" or "inactive".
-Active scrollframes are the ones that do have their contents put on a separate layer (or set of layers), and inactive ones do not.
-
-Consider a page with a scrollframe that is initially inactive.
-When layout generates the layers for this page, the content of the scrollframe will be flattened into some other PaintedLayer (call it P).
-The layout code also adds the area (or bounding region in case of weird shapes) of the scrollframe to the dispatch-to-content region of P.
-
-When the user starts interacting with that content, the hit-test in the APZ code finds the dispatch-to-content region of P.
-The input block therefore has a tentative target of P when it goes into step 4(ii) in the flow above.
-When gecko processes the input event, it must detect the inactive scrollframe and activate it, as part of step 7.
-Finally, the widget code sends the SetTargetAPZC notification in step 8 to notify the APZ that the input block should really apply to this new layer.
-The issue here is that the layer transaction containing the new layer must reach the compositor and APZ before the SetTargetAPZC notification.
-If this does not occur within the 400ms timeout, the APZ code will be unable to update the tentative target, and will continue to use P for that input block.
-Input blocks that start after the layer transaction will get correctly routed to the new layer as there will now be a layer and APZC instance for the active scrollframe.
-
-This model implies that when the user initially attempts to scroll an inactive scrollframe, it may end up scrolling an ancestor scrollframe.
-(This is because in the absence of the SetTargetAPZC notification, the input events will get applied to the closest ancestor scrollframe's APZC.)
-Only after the round-trip to the gecko thread is complete is there a layer for async scrolling to actually occur on the scrollframe itself.
-At that point the scrollframe will start receiving new input blocks and will scroll normally.
deleted file mode 100644
--- a/gfx/doc/GraphicsOverview.md
+++ /dev/null
@@ -1,83 +0,0 @@
-Mozilla Graphics Overview {#graphicsoverview}
-=================
-## Work in progress.  Possibly incorrect or incomplete.
-
-Overview
---------
-The graphics systems is responsible for rendering (painting, drawing) the frame tree (rendering tree) elements as created by the layout system.  Each leaf in the tree has content, either bounded by a rectangle (or perhaps another shape, in the case of SVG.)
-
-The simple approach for producing the result would thus involve traversing the frame tree, in a correct order, drawing each frame into the resulting buffer and displaying (printing non-withstanding) that buffer when the traversal is done. It is worth spending some time on the "correct order" note above.  If there are no overlapping frames, this is fairly simple - any order will do, as long as there is no background.  If there is background, we just have to worry about drawing that first. Since we do not control the content, chances are the page is more complicated.  There are overlapping frames, likely with transparency, so we need to make sure the elements are draw "back to front", in layers, so to speak.  Layers are an important concept, and we will revisit them shortly, as they are central to fixing a major issue with the above simple approach.
-
-While the above simple approach will work, the performance will suffer.  Each time anything changes in any of the frames, the complete process needs to be repeated, everything needs to be redrawn.  Further, there is very little space to take advantage of the modern graphics (GPU) hardware, or multi-core computers.  If you recall from the previous sections, the frame tree is only accessible from the UI thread, so while we're doing all this work, the UI is basically blocked.
-
-### (Retained) Layers
-
-Layers framework was introduced to address the above performance issues, by having a part of the design address each item. At the high level:
-
-1. We create a layer tree. The leaf elements of the tree contain all frames (possibly multiple frames per leaf).
-2. We render each layer tree element and cache (retain) the result.
-3. We composite (combine) all the leaf elements into the final result.
-
-Let's examine each of these steps, in reverse order.
-
-### Compositing
-We use the term composite as it implies that the order is important.  If the elements being composited overlap, whether there is transparency involved or not, the order in which they are combined will effect the result.
-Compositing is where we can use some of the power of the modern graphics hardware.  It is optimal for doing this job. In the scenarios where only the position of individual frames changes, without the content inside them changing, we see why caching each layer would be advantageous - we only need to repeat the final compositing step, completely skipping the layer tree creation and the rendering of each leaf, thus speeding up the process considerably.
-
-Another benefit is equally apparent in the context of the stated deficiencies of the simple approach. We can use the available graphics hardware accelerated APIs to do the compositing step.  Direct3D, OpenGL can be used on different platforms and are well suited to accelerate this step.
-
-Finally, we can now envision performing the compositing step on a separate thread, unblocking the UI thread for other work, and doing more work in parallel.  More on this below.
-
-It is important to note that the number of operations in this step is proportional to the number of layer tree (leaf) elements, so there is additional work and complexity involved, when the layer tree is large.
-
-#### Render and retain layer elements
-As we saw, the compositing step benefits from caching the intermediate result.  This does result in the extra memory usage, so needs to be considered during the layer tree creation. Beyond the caching, we can accelerate the rendering of each element by (indirectly) using the available platform APIs (e.g., Direct2D, CoreGraphics, even some of the 3D APIs like OpenGL or Direct3D) as available.  This is actually done through a platform independent API (see Moz2D) below, but is important to realize it does get accelerated appropriately.
-
-#### Creating the layer tree
-We need to create a layer tree (from the frames tree), which will give us the correct result while striking the right balance between a layer per frame element and a single layer for the complete frames tree.  As was mentioned above, there is an overhead in traversing the whole tree and caching each of the elements, balanced by the performance improvements.  Some of the performance improvements are only noticed when something changes (e.g., one element is moving, we only need to redo the compositing step).
-
-### Refresh Driver
-
-### Layers
-
-#### Rendering each layer
-
-### Tiling vs. Buffer Rotation vs. Full paint
-
-#### Compositing for the final result
-
-### Graphics API
-
-#### Moz2D
-* The Moz2D graphics API, part of the Azure project, is a cross-platform interface onto the various graphics backends that Gecko uses for rendering such as Direct2D (1.0 and 1.1), Skia, Cairo, Quartz, and NV Path. Adding a new graphics platform to Gecko is accomplished by adding a backend to Moz2D.
-\see [Moz2D documentation on wiki](https://wiki.mozilla.org/Platform/GFX/Moz2D)
-
-#### Compositing
-
-#### Image Decoding
-
-#### Image Animation
-
-### Funny words
-There are a lot of code words that we use to refer to projects, libraries, areas of the code.  Here's an attempt to cover some of those:
-* Azure - See Moz2D in the Graphics API section above.
-* Backend - See Moz2D in the Graphics API section above.
-* Cairo - http://www.cairographics.org/.  Cairo is a 2D graphics library with support for multiple output devices. Currently supported output targets include the X Window System (via both Xlib and XCB), Quartz, Win32, image buffers, PostScript, PDF, and SVG file output. 
-* Moz2D - See Moz2D in the Graphics API section above.
-* Thebes - Graphics API that preceded Moz2D.
-* Reflow
-* Display list
-
-### [Historical Documents](http://www.youtube.com/watch?v=lLZQz26-kms)
-A number of posts and blogs that will give you more details or more background, or reasoning that led to different solutions and approaches.
-
-* 2010-01 [Layers: Cross Platform Acceleration] (http://www.basschouten.com/blog1.php/layers-cross-platform-acceleration) 
-* 2010-04 [Layers] (http://robert.ocallahan.org/2010/04/layers_01.html)
-* 2010-07 [Retained Layers](http://robert.ocallahan.org/2010/07/retained-layers_16.html)
-* 2011-04 [Introduction](https://blog.mozilla.org/joe/2011/04/26/introducing-the-azure-project/ Moz2D)
-* 2011-07 [Layers](http://chrislord.net/index.php/2011/07/25/shadow-layers-and-learning-by-failing/ Shadow)
-* 2011-09 [Graphics API Design](http://robert.ocallahan.org/2011/09/graphics-api-design.html)
-* 2012-04 [Moz2D Canvas on OSX](http://muizelaar.blogspot.ca/2012/04/azure-canvas-on-os-x.html)
-* 2012-05 [Mask Layers](http://featherweightmusings.blogspot.co.uk/2012/05/mask-layers_26.html)
-* 2013-07 [Graphics related](http://www.basschouten.com/blog1.php)
-
deleted file mode 100644
--- a/gfx/doc/LayersHistory.md
+++ /dev/null
@@ -1,60 +0,0 @@
-This is an overview of the major events in the history of our Layers infrastructure.
-
-- iPhone released in July 2007 (Built on a toolkit called LayerKit)
-
-- Core Animation (October 2007) LayerKit was publicly renamed to OS X 10.5
-
-- Webkit CSS 3d transforms (July 2009)
-
-- Original layers API (March 2010) Introduced the idea of a layer manager that
-  would composite. One of the first use cases for this was hardware accelerated
-  YUV conversion for video.
-
-- Retained layers (July 7 2010 - Bug 564991)
-This was an important concept that introduced the idea of persisting the layer
-content across paints in gecko controlled buffers instead of just by the OS. This introduced
-the concept of buffer rotation to deal with scrolling instead of using the
-native scrolling APIs like ScrollWindowEx
-
-- Layers IPC (July 2010 - Bug 570294)
-This introduced shadow layers and edit lists and was originally done for e10s v1
-
-- 3d transforms (September 2011 - Bug 505115)
-
-- OMTC (December 2011 - Bug 711168)
-This was prototyped on OS X but shipped first for Fennec
-
-- Tiling v1 (April 2012 - Bug 739679)
-Originally done for Fennec.
-This was done to avoid situations where we had to do a bunch of work for
-scrolling a small amount. i.e. buffer rotation.  It allowed us to have a
-variety of interesting features like progressive painting and lower resolution
-painting.
-
-- C++ Async pan zoom controller (July 2012 - Bug 750974)
-The existing APZ code was in Java for Fennec so this was reimplemented.
-
-- Streaming WebGL Buffers (February 2013 - Bug 716859)
-Infrastructure to allow OMTC WebGL and avoid the need to glFinish() every
-frame.
-
-- Compositor API (April 2013 - Bug 825928)
-The planning for this started around November 2012.
-Layers refactoring created a compositor API that abstracted away the differences between the
-D3D vs OpenGL. The main piece of API is DrawQuad.
-
-- Tiling v2 (Mar 7 2014 - Bug 963073)
-Tiling for B2G. This work is mainly porting tiled layers to new textures,
-implementing double-buffered tiles and implementing a texture client pool, to
-be used by tiled content clients.
-
- A large motivation for the pool was the very slow performance of allocating tiles because
-of the sync messages to the compositor.
-
- The slow performance of allocating was directly addressed by bug 959089 which allowed us
-to allocate gralloc buffers without sync messages to the compositor thread.
-
-- B2G WebGL performance (May 2014 - Bug 1006957, 1001417, 1024144)
-This work improved the synchronization mechanism between the compositor
-and the producer.
-
deleted file mode 100644
--- a/gfx/doc/MainPage.md
+++ /dev/null
@@ -1,21 +0,0 @@
-Mozilla Graphics {#mainpage}
-======================
-
-## Work in progress.  Possibly incorrect or incomplete.
-
-
-Introduction
--------
-This collection of linked pages contains a combination of Doxygen
-extracted source code documentation and design documents for the
-Mozilla graphics architecture.  The design documents live in gfx/docs directory.
-
-This [wiki page](https://wiki.mozilla.org/Platform/GFX) contains
-information about graphics and the graphics team at MoCo.
-
-Continue here for a [very high level introductory overview](@ref graphicsoverview)
-if you don't know where to start.
-
-Useful pointers for creating documentation
-------
-[The mechanics of creating these files](https://wiki.mozilla.org/Platform/GFX/DesignDocumentationGuidelines)
deleted file mode 100644
--- a/gfx/doc/MozSurface.md
+++ /dev/null
@@ -1,124 +0,0 @@
-MozSurface {#mozsurface}
-==========
-
-**This document is work in progress.  Some information may be missing or incomplete.**
-
-## Goals
-
-We need to be able to safely and efficiently render web content into surfaces that may be shared accross processes.
-MozSurface is a cross-process and backend-independent Surface API and not a stream API.
-
-## Owner
-
-Nicolas Silva
-
-## Definitions
-
-## Use cases
-
-Drawing web content into a surface and share it with the compositor process to display it on the screen without copies.
-
-## Requirement
-
-* It must be possible to efficiently share a MozSurface with a separate thread or process through IPDL
-* It must be possible to obtain read access a MozSurface on both the client and the host side at the same time.
-* The creation, update and destrution of surfaces must be safe and race-free. In particular, the ownership of the shared data must be clearly defined.
-* MozSurface must be a cross-backend/cross-platform abstraction that we will use on all of the supported platforms.
-* It must be possible to efficiently draw into a MozSurface using Moz2D.
-* While it should be possible to share MozSurfaces accross processes, it should not be limited to that. MozSurface should also be the preferred abstraction for use with surfaces that are not shared with the compositor process.
-
-## TextureClient and TextureHost
-
-TextureClient and TextureHost are the closest abstractions we currently have to MozSurface. The current plan is to evolve TextureClient into MozSurface. In its current state, TextureClient doesn't meet all the requirements and desisgn decisions of MozSurface yet.
-
-In particular, TextureClient/TextureHost are designed around cross-process sharing specifically. See the SharedMozSurface design document for more information about TextureClient and TextureHost.
-
-## Locking semantics
-
-In order to access the shared surface data users of MozSurface must acquire and release a lock on the surface, specifying the open mode (read/write/read+write).
-
-    bool Lock(OpenMode aMode);
-    void Unlock();
-
-This locking API has two purposes:
-
-* Ensure that access to the shared data is race-free.
-* Let the implemetation do whatever is necessary for the user to have access to the data. For example it can be mapping and unmapping the surface data in memory if the underlying backend requires it.
-
-The lock is expected to behave as a cross-process blocking read/write lock that is not reentrant.
-
-## Immutable surfaces
-
-In some cases we know in advance that a surface will not be modified after it has been shared. This is for example true for video frames. In this case the surface can be marked as immutable and the underlying implementation doesn't need to hold an actual blocking lock on the shared data.
-Trying to acquire a write lock on a MozSurface that is marked as immutable and already shared must fail (return false).
-Note that it is still required to use the Lock/Unlock API to read the data, in order for the implementation to be able to properly map and unmap the memory. This is just an optimization and a safety check.
-
-## Drawing into a surface
-
-In most cases we want to be able to paint directly into a surface through the Moz2D API.
-
-A surface lets you *borrow* a DrawTarget that is only valid between Lock and Unlock.
-
-    DrawTarget* GetAsDrawTarget();
-
-It is invalid to hold a reference to the DrawTarget after Unlock, and a different DrawTarget may be obtained during the next Lock/Unlock interval.
-
-In some cases we want to use MozSurface without drawing into it. For instance to share video frames accross processes. Some surface types may also not be accessible through a DrawTarget (for example YCbCr surfaces).
-
-    bool CanExposeDrawTarget();
-
-helps with making sure that a Surface supports exposing a Moz2D DrawTarget.
-
-## Using a MozSurface as a source for Compositing
-
-To interface with the Compositor API, MozSurface gives access to TextureSource objects. TextureSource is the cross-backend representation of a texture that Compositor understands.
-While MozSurface handles memory management of (potentially shared) texture data, TextureSource is only an abstraction for Compositing.
-
-## Fence synchronization
-
-TODO: We need to figure this out. Right now we have a Gonk specific implementation, but no cross-platform abstraction/design.
-
-## Ownership of the shared data
-
-MozSurface (TextureClient/TextureHost in its current form) defines ownership rules that depend on the configuration of the surface, in order to satisy efficiency and safety requirements.
-
-These rules rely on the fact that the underlying shared data is strictly owned by the MozSurface. This means that keeping direct references to the shared data is illegal and unsafe.
-
-## Internal buffers / direct texturing
-
-Some MozSurface implementations use CPU-side shared memory to share the texture data accross processes, and require a GPU texture upload when interfacing with a TextureSource. In this case we say that the surface has an internal buffer (because it is implicitly equivalent to double buffering where the shared data is the back buffer and the GPU side texture is the front buffer). We also say that it doesn't do "direct texturing" meaning that we don't draw directly into the GPU-side texture.
-
-Examples:
-
- * Shmem MozSurface + OpenGL TextureSource: Has an internal buffer (no direct texturing)
- * Gralloc MozSurface + Gralloc TextureSource: No internal buffer (direct texturing)
-
-While direct texturing is usually the most efficient way, it is not always available depending on the platform and the required allocation size or format. Textures with internal buffers have less restrictions around locking since the host side will only need to read from the MozSurface once per update, meaning that we can often get away with single buffering where we would need double buffering with direct texturing.
-
-## Alternative solutions
-
-## Backends
-
-We have MozSurface implementaions (classes inheriting from TextureClient/TextureHost) for OpenGL, Software, D3D9, and D3D11 backends.
-Some implemtations can be used with any backend (ex. ShmemTextureClient/Host).
-
-## Users of MozSurface
-
-MozSurface is the mechanism used by layers to share surfaces with the compositor, but it is not limited to layers. It should be used by anything that draws into a surface that may be shared with the compositor thread.
-
-## Testing
-
-TODO - How can we make MozSurface more testable and what should we test?
-
-## Future work
-
-### Using a MozSurface as a source for Drawing
-
-MozSurface should be able to expose a borrowed Moz2D SourceSurface that is valid between Lock and Unlock similarly to how it exposes a DrawTarget.
-
-## Comparison with other APIs
-
-MozSurface is somewhat equivalent to Gralloc on Android/Gonk: it is a reference counted cross-process surface with locking semantics. While Gralloc can interface itself with OpenGL textures for compositing, MozSurface can interface itself to TextureSource objects.
-
-MozSurface should not be confused with higher level APIs such as EGLStream. A swap-chain API like EGLStream can be implemented on top of MozSurface, but MozSurface's purpose is to define and manage the memory and resources of shared texture data.
-
deleted file mode 100644
--- a/gfx/doc/SharedMozSurface.md
+++ /dev/null
@@ -1,147 +0,0 @@
-Shared MozSurface {#mozsurface}
-==========
-
-**This document is work in progress.  Some information may be missing or incomplete.**
-
-Shared MozSurfaces represent an important use case of MozSurface, anything that is in the MozSurface design document also applies to shared MozSurfaces.
-
-## Goals
-
-We need to be able to safely and efficiently render web content into surfaces that may be shared accross processes.
-MozSurface is a cross-process and backend-independent Surface API and not a stream API.
-
-## Owner
-
-Nicolas Silva
-
-## Definitions
-
-* Client and Host: In Gecko's compositing architecture, the client process is the producer, while the host process is the consumer side, where compositing takes place.
-
-## Use cases
-
-Drawing web content into a surface and share it with the compositor process to display it on the screen without copies.
-
-## Requirement
-
-Shared MozSurfaces represent an important use case of MozSurface, it has the same requirements as MozSurface.
-
-## TextureClient and TextureHost
-
-TextureClient and TextureHost are the closest abstractions we currently have to MozSurface.
-Inline documentation about TextureClient and TextureHost can be found in:
-
-* [gfx/layers/client/TextureClient.h](http://dxr.mozilla.org/mozilla-central/source/gfx/layers/client/TextureClient.h)
-* [gfx/layers/composite/TextureHost.h](http://dxr.mozilla.org/mozilla-central/source/gfx/layers/composite/TextureHost.h)
-
-TextureClient is the client-side handle on a MozSurface, while TextureHost is the equivalent host-side representation. There can only be one TextureClient for a given TextureHost, and one TextureHost for a given TextureClient. Likewise, there can only be one shared object for a given TextureClient/TextureHost pair.
-
-A MozSurface containing data that is shared between a client process and a host process exists in the following form:
-
-```
-                                 .
-            Client process       .      Host process
-                                 .
-     ________________      ______________      ______________
-    |                |    |              |    |              |
-    | TextureClient  +----+ <SharedData> +----+ TextureHost  |
-    |________________|    |______________|    |______________|
-                                 .
-                                 .
-                                 .
-    Figure 1) A Surface as seen by the client and the host processes
-```
-
-The above figure is a logical representation, not a class diagram.
-`<SharedData>` is a placeholder for whichever platform specific surface type we are sharing, for example a Gralloc buffer on Gonk or a D3D11 texture on Windows.
-
-## Deallocation protocol
-
-The shared data is accessible by both the client-side and the host-side of the MozSurface. A deallocation protocol must be defined to handle which side deallocates the data, and to ensure that it doesn't cause any race condition.
-The client side, which contains the web content's logic, always "decides" when a surface is needed or not. So the life time of a MozSurface is driven by the reference count of it's client-side handle (TextureClient).
-When a TextureClient's reference count reaches zero, a "Remove" message is sent in order to let the host side that the shared data is not accessible on the client side and that it si safe for it to be deleted. The host side responds with a "Delete" message.
-
-
-```
-           client side                .         host side
-                                      .
-    (A) Client: Send Remove     -.    .
-                                  \   .
-                                   \  .   ... can receive and send ...
-                                    \
-        Can receive                  `--> (B) Host: Receive Remove
-        Can't send                         |
-                                      .-- (C) Host: Send Delete
-                                     /
-                                    / .   ... can't receive nor send ...
-                                   /  .
-    (D) Client: Receive Delete <--'   .
-                                      .
-    Figure 2) MozSurface deallocation handshake
-```
-
-This handshake protocol is twofold:
-
-* It defines where and when it is possible to deallocate the shared data without races
-* It makes it impossible for asynchronous messages to race with the destruction of the MozSurface.
-
-### Deallocating on the host side
-
-In the common case, the shared data is deallocated asynchronously on the host side. In this case the deallocation takes place at the point (C) of figure 2.
-
-### Deallocating on the client side
-
-In some rare cases, for instance if the underlying implementation requires it, the shared data must be deallocated on the client side. In such cases, deallocation happens at the point (D) of figure 2.
-
-In some exceptional cases, this needs to happen synchronously, meaning that the client-side thread will block until the Delete message is received. This is supported but it is terrible for performance, so it should be avoided as much as possible.
-Currently this is needed when shutting down a hardware-decoded video stream with libstagefright on Gonk, because the libstagefright unfortunately assumes it has full ownership over the shared data (gralloc buffers) and crashes if there are still users of the buffers.
-
-### Sharing state
-
-The above deallocation protocol of a MozSurface applies to the common case that is when the surface is shared between two processes. A Surface can also be deallocated while it is not shared.
-
-The sharing state of a MozSurface can be one of the following:
-
-* (1) Uninitialized (it doesn't have any shared data)
-* (2) Local (it isn't shared with the another thread/process)
-* (3) Shared (the state you would expect it to be most of the time)
-* (4) Invalid (when for some rare cases we needed to force the deallocation of the shared data before the destruction of the TextureClient object).
-
-Surfaces can move from state N to state N+1 and be deallocated in any of these states. It could be possible to move from Shared to Local, but we currently don't have a use case for it.
-
-The deallocation protocol above, applies to the Shared state (3).
-In the other cases:
-
-* (1) Unitilialized: There is nothing to do.
-* (2) Local: The shared data is deallocated by the client side without need for a handshake, since it is not shared with other threads.
-* (4) Invalid: There is nothing to do (deallocation has already happenned).
-
-## Alternative solutions
-
-### Sending ownership back and forth between the client and host sides through message passing, intead of sharing.
-
-The current design of MozSurface makes the surface accessible from both sides at the same time, forcing us to do Locking and have a hand shake around deallocating the shared data, while using pure message passing and making the surface accessible only from one side at a time would avoid these complications.
-
-Using pure message passing was actually the first approach we tried when we created the first version of TextureClient and TextureHost. This strategy failed in several places, partly because of some legacy in Gecko's architecture, and partly because of some of optimizations we do to avoid copying surfaces.
-
-We need a given surface to be accessible on both the client and host for the following reasons:
-
-* Gecko can at any time require read access on the client side to a surface that is shared with the host process, for example to build a temporary layer manager and generate a screenshot. This is mostly a legacy problem.
-* We do some copy-on-write optimizations on surfaces that are shared with the compositor in order to keep invalid regions as small as possible. Out tiling implementation is an example of that.
-* Our buffer rotation code on scrollable non-tiled layers also requires a synchronization on the client side between the front and back buffers, while the front buffer is used on the host side.
-
-## Testing
-
-TODO - How can we make shared MozSurfaces more testable and what should we test?
-
-## Future work
-
-### Rename TextureClient/TextureHost
-
-The current terminology is very confusing.
-
-### Unify TextureClient and TextureHost
-
-TextureClient and TextureHost should live under a common interface to better hide the IPC details. The base classe should only expose the non-ipc related methods such as Locking, access through a DrawTarget, access to a TextureSource.
-
-## Comparison with other APIs
deleted file mode 100644
--- a/gfx/doc/Silk.md
+++ /dev/null
@@ -1,246 +0,0 @@
-Silk Architecture Overview
-=================
-
-#Architecture
-Our current architecture is to align three components to hardware vsync timers:
-
-1. Compositor
-2. RefreshDriver / Painting
-3. Input Events
-
-The flow of our rendering engine is as follows:
-
-1. Hardware Vsync event occurs on an OS specific *Hardware Vsync Thread* on a per monitor basis.
-2. The *Hardware Vsync Thread* attached to the monitor notifies the **CompositorVsyncDispatchers** and **RefreshTimerVsyncDispatcher**.
-3. For every Firefox window on the specific monitor, notify a **CompositorVsyncDispatcher**. The **CompositorVsyncDispatcher** is specific to one window.
-4. The **CompositorVsyncDispatcher** notifies a **CompositorWidgetVsyncObserver** when remote compositing, or a **CompositorVsyncScheduler::Observer** when compositing in-process.
-5. If remote compositing, a vsync notification is sent from the **CompositorWidgetVsyncObserver** to the **VsyncBridgeChild** on the UI process, which sends an IPDL message to the **VsyncBridgeParent** on the compositor thread of the GPU process, which then dispatches to **CompositorVsyncScheduler::Observer**.
-6. The **RefreshTimerVsyncDispatcher** notifies the Chrome **RefreshTimer** that a vsync has occured.
-7. The **RefreshTimerVsyncDispatcher** sends IPC messages to all content processes to tick their respective active **RefreshTimer**.
-8. The **Compositor** dispatches input events on the *Compositor Thread*, then composites. Input events are only dispatched on the *Compositor Thread* on b2g.
-9. The **RefreshDriver** paints on the *Main Thread*.
-
-The implementation is broken into the following sections and will reference this figure. Note that **Objects** are bold fonts while *Threads* are italicized.
-
-<img src="silkArchitecture.png" width="900px" height="630px" />
-
-#Hardware Vsync
-Hardware vsync events from (1), occur on a specific **Display** Object.
-The **Display** object is responsible for enabling / disabling vsync on a per connected display basis.
-For example, if two monitors are connected, two **Display** objects will be created, each listening to vsync events for their respective displays.
-We require one **Display** object per monitor as each monitor may have different vsync rates.
-As a fallback solution, we have one global **Display** object that can synchronize across all connected displays.
-The global **Display** is useful if a window is positioned halfway between the two monitors.
-Each platform will have to implement a specific **Display** object to hook and listen to vsync events.
-As of this writing, both Firefox OS and OS X create their own hardware specific *Hardware Vsync Thread* that executes after a vsync has occured.
-OS X creates one *Hardware Vsync Thread* per **CVDisplayLinkRef**.
-We do not currently support multiple displays, so we use one global **CVDisplayLinkRef** that works across all active displays.
-On Windows, we have to create a new platform *thread* that waits for DwmFlush(), which works across all active displays.
-Once the thread wakes up from DwmFlush(), the actual vsync timestamp is retrieved from DwmGetCompositionTimingInfo(), which is the timestamp that is actually passed into the compositor and refresh driver.
-
-When a vsync occurs on a **Display**, the *Hardware Vsync Thread* callback fetches all **CompositorVsyncDispatchers** associated with the **Display**.
-Each **CompositorVsyncDispatcher** is notified that a vsync has occured with the vsync's timestamp.
-It is the responsibility of the **CompositorVsyncDispatcher** to notify the **Compositor** that is awaiting vsync notifications.
-The **Display** will then notify the associated **RefreshTimerVsyncDispatcher**, which should notify all active **RefreshDrivers** to tick.
-
-All **Display** objects are encapsulated in a **VsyncSource** object.
-The **VsyncSource** object lives in **gfxPlatform** and is instantiated only on the parent process when **gfxPlatform** is created.
-The **VsyncSource** is destroyed when **gfxPlatform** is destroyed.
-There is only one **VsyncSource** object throughout the entire lifetime of Firefox.
-Each platform is expected to implement their own **VsyncSource** to manage vsync events.
-On Firefox OS, this is through the **HwcComposer2D**.
-On OS X, this is through **CVDisplayLinkRef**.
-On Windows, it should be through **DwmGetCompositionTimingInfo**.
-
-#Compositor
-When the **CompositorVsyncDispatcher** is notified of the vsync event, the **CompositorVsyncScheduler::Observer** associated with the **CompositorVsyncDispatcher** begins execution.
-Since the **CompositorVsyncDispatcher** executes on the *Hardware Vsync Thread* and the **Compositor** composites on the *CompositorThread*, the **CompositorVsyncScheduler::Observer** posts a task to the *CompositorThread*.
-The **CompositorBridgeParent** then composites.
-The model where the **CompositorVsyncDispatcher** notifies components on the *Hardware Vsync Thread*, and the component schedules the task on the appropriate thread is used everywhere.
-
-The **CompositorVsyncScheduler::Observer** listens to vsync events as needed and stops listening to vsync when composites are no longer scheduled or required.
-Every **CompositorBridgeParent** is associated and tied to one **CompositorVsyncScheduler::Observer**, which is associated with the **CompositorVsyncDispatcher**.
-Each **CompositorBridgeParent** is associated with one widget and is created when a new platform window or **nsBaseWidget** is created.
-The **CompositorBridgeParent**, **CompositorVsyncDispatcher**, **CompositorVsyncScheduler::Observer**, and **nsBaseWidget** all have the same lifetimes, which are created and destroyed together.
-
-##Out-of-process Compositors
-When compositing out-of-process, this model changes slightly.
-In this case there are effectively two observers: a UI process observer (**CompositorWidgetVsyncObserver**), and the **CompositorVsyncScheduler::Observer** in the GPU process.
-There are also two dispatchers: the widget dispatcher in the UI process (**CompositorVsyncDispatcher**), and the IPDL-based dispatcher in the GPU process (**CompositorBridgeParent::NotifyVsync**).
-The UI process observer and the GPU process dispatcher are linked via an IPDL protocol called PVsyncBridge.
-**PVsyncBridge** is a top-level protocol for sending vsync notifications to the compositor thread in the GPU process.
-The compositor controls vsync observation through a separate actor, **PCompositorWidget**, which (as a subactor for **CompositorBridgeChild**) links the compositor thread in the GPU process to the main thread in the UI process.
-
-Out-of-process compositors do not go through **CompositorVsyncDispatcher** directly.
-Instead, the **CompositorWidgetDelegate** in the UI process creates one, and gives it a **CompositorWidgetVsyncObserver**.
-This observer forwards notifications to a Vsync I/O thread, where **VsyncBridgeChild** then forwards the notification again to the compositor thread in the GPU process.
-The notification is received by a **VsyncBridgeParent**.
-The GPU process uses the layers ID in the notification to find the correct compositor to dispatch the notification to.
-
-###CompositorVsyncDispatcher
-The **CompositorVsyncDispatcher** executes on the *Hardware Vsync Thread*.
-It contains references to the **nsBaseWidget** it is associated with and has a lifetime equal to the **nsBaseWidget**.
-The **CompositorVsyncDispatcher** is responsible for notifying the **CompositorBridgeParent** that a vsync event has occured.
-There can be multiple **CompositorVsyncDispatchers** per **Display**, one **CompositorVsyncDispatcher** per window.
-The only responsibility of the **CompositorVsyncDispatcher** is to notify components when a vsync event has occured, and to stop listening to vsync when no components require vsync events.
-We require one **CompositorVsyncDispatcher** per window so that we can handle multiple **Displays**.
-When compositing in-process, the **CompositorVsyncDispatcher** is attached to the CompositorWidget for the
-window. When out-of-process, it is attached to the CompositorWidgetDelegate, which forwards
-observer notifications over IPDL. In the latter case, its lifetime is tied to a CompositorSession
-rather than the nsIWidget.
-
-###Multiple Displays
-The **VsyncSource** has an API to switch a **CompositorVsyncDispatcher** from one **Display** to another **Display**.
-For example, when one window either goes into full screen mode or moves from one connected monitor to another.
-When one window moves to another monitor, we expect a platform specific notification to occur.
-The detection of when a window enters full screen mode or moves is not covered by Silk itself, but the framework is built to support this use case.
-The expected flow is that the OS notification occurs on **nsIWidget**, which retrieves the associated **CompositorVsyncDispatcher**.
-The **CompositorVsyncDispatcher** then notifies the **VsyncSource** to switch to the correct **Display** the **CompositorVsyncDispatcher** is connected to.
-Because the notification works through the **nsIWidget**, the actual switching of the **CompositorVsyncDispatcher** to the correct **Display** should occur on the *Main Thread*.
-The current implementation of Silk does not handle this case and needs to be built out.
-
-###CompositorVsyncScheduler::Observer
-The **CompositorVsyncScheduler::Observer** handles the vsync notifications and interactions with the **CompositorVsyncDispatcher**.
-When the **Compositor** requires a scheduled composite, it notifies the **CompositorVsyncScheduler::Observer** that it needs to listen to vsync.
-The **CompositorVsyncScheduler::Observer** then observes / unobserves vsync as needed from the **CompositorVsyncDispatcher** to enable composites.
-
-###GeckoTouchDispatcher
-The **GeckoTouchDispatcher** is a singleton that resamples touch events to smooth out jank while tracking a user's finger.
-Because input and composite are linked together, the **CompositorVsyncScheduler::Observer** has a reference to the **GeckoTouchDispatcher** and vice versa.
-
-###Input Events
-One large goal of Silk is to align touch events with vsync events.
-On Firefox OS, touchscreens often have different touch scan rates than the display refreshes.
-A Flame device has a touch refresh rate of 75 HZ, while a Nexus 4 has a touch refresh rate of 100 HZ, while the device's display refresh rate is 60HZ.
-When a vsync event occurs, we resample touch events, and then dispatch the resampled touch event to APZ.
-Touch events on Firefox OS occur on a *Touch Input Thread* whereas they are processed by APZ on the *APZ Controller Thread*.
-We use [Google Android's touch resampling](http://www.masonchang.com/blog/2014/8/25/androids-touch-resampling-algorithm) algorithm to resample touch events.
-
-Currently, we have a strict ordering between Composites and touch events.
-When a touch event occurs on the *Touch Input Thread*, we store the touch event in a queue.
-When a vsync event occurs, the **CompositorVsyncDispatcher** notifies the **Compositor** of a vsync event, which notifies the **GeckoTouchDispatcher**.
-The **GeckoTouchDispatcher** processes the touch event first on the *APZ Controller Thread*, which is the same as the *Compositor Thread* on b2g, then the **Compositor** finishes compositing.
-We require this strict ordering because if a vsync notification is dispatched to both the **Compositor** and **GeckoTouchDispatcher** at the same time, a race condition occurs between processing the touch event and therefore position versus compositing.
-In practice, this creates very janky scrolling.
-As of this writing, we have not analyzed input events on desktop platforms.
-
-One slight quirk is that input events can start a composite, for example during a scroll and after the **Compositor** is no longer listening to vsync events.
-In these cases, we notify the **Compositor** to observe vsync so that it dispatches touch events.
-If touch events were not dispatched, and since the **Compositor** is not listening to vsync events, the touch events would never be dispatched.
-The **GeckoTouchDispatcher** handles this case by always forcing the **Compositor** to listen to vsync events while touch events are occurring.
-
-###Widget, Compositor, CompositorVsyncDispatcher, GeckoTouchDispatcher Shutdown Procedure
-When the [nsBaseWidget shuts down](https://hg.mozilla.org/mozilla-central/file/0df249a0e4d3/widget/nsBaseWidget.cpp#l182) - It calls nsBaseWidget::DestroyCompositor on the *Gecko Main Thread*.
-During nsBaseWidget::DestroyCompositor, it first destroys the CompositorBridgeChild.
-CompositorBridgeChild sends a sync IPC call to CompositorBridgeParent::RecvStop, which calls [CompositorBridgeParent::Destroy](https://hg.mozilla.org/mozilla-central/file/ab0490972e1e/gfx/layers/ipc/CompositorBridgeParent.cpp#l509).
-During this time, the *main thread* is blocked on the parent process.
-CompositorBridgeParent::RecvStop runs on the *Compositor thread* and cleans up some resources, including setting the **CompositorVsyncScheduler::Observer** to nullptr.
-CompositorBridgeParent::RecvStop also explicitly keeps the CompositorBridgeParent alive and posts another task to run CompositorBridgeParent::DeferredDestroy on the Compositor loop so that all ipdl code can finish executing.
-The **CompositorVsyncScheduler::Observer** also unobserves from vsync and cancels any pending composite tasks.
-Once CompositorBridgeParent::RecvStop finishes, the *main thread* in the parent process continues shutting down the nsBaseWidget.
-
-At the same time, the *Compositor thread* is executing tasks until CompositorBridgeParent::DeferredDestroy runs, which flushes the compositor message loop.
-Now we have two tasks as both the nsBaseWidget releases a reference to the Compositor on the *main thread* during destruction and the CompositorBridgeParent::DeferredDestroy releases a reference to the CompositorBridgeParent on the *Compositor Thread*.
-Finally, the CompositorBridgeParent itself is destroyed on the *main thread* once both references are gone due to explicit [main thread destruction](https://hg.mozilla.org/mozilla-central/file/50b95032152c/gfx/layers/ipc/CompositorBridgeParent.h#l148).
-
-With the **CompositorVsyncScheduler::Observer**, any accesses to the widget after nsBaseWidget::DestroyCompositor executes are invalid.
-Any accesses to the compositor between the time the nsBaseWidget::DestroyCompositor runs and the CompositorVsyncScheduler::Observer's destructor runs aren't safe yet a hardware vsync event could occur between these times.
-Since any tasks posted on the Compositor loop after CompositorBridgeParent::DeferredDestroy is posted are invalid, we make sure that no vsync tasks can be posted once CompositorBridgeParent::RecvStop executes and DeferredDestroy is posted on the Compositor thread.
-When the sync call to CompositorBridgeParent::RecvStop executes, we explicitly set the CompositorVsyncScheduler::Observer to null to prevent vsync notifications from occurring.
-If vsync notifications were allowed to occur, since the **CompositorVsyncScheduler::Observer**'s vsync notification executes on the *hardware vsync thread*, it would post a task to the Compositor loop and may execute after CompositorBridgeParent::DeferredDestroy.
-Thus, we explicitly shut down vsync events in the **CompositorVsyncDispatcher** and **CompositorVsyncScheduler::Observer** during nsBaseWidget::Shutdown to prevent any vsync tasks from executing after CompositorBridgeParent::DeferredDestroy.
-
-The **CompositorVsyncDispatcher** may be destroyed on either the *main thread* or *Compositor Thread*, since both the nsBaseWidget and **CompositorVsyncScheduler::Observer** race to destroy on different threads.
-nsBaseWidget is destroyed on the *main thread* and releases a reference to the **CompositorVsyncDispatcher** during destruction.
-The **CompositorVsyncScheduler::Observer** has a race to be destroyed either during CompositorBridgeParent shutdown or from the **GeckoTouchDispatcher** which is destroyed on the main thread with [ClearOnShutdown](https://hg.mozilla.org/mozilla-central/file/21567e9a6e40/xpcom/base/ClearOnShutdown.h#l15).
-Whichever object, the CompositorBridgeParent or the **GeckoTouchDispatcher** is destroyed last will hold the last reference to the **CompositorVsyncDispatcher**, which destroys the object.
-
-#Refresh Driver
-The Refresh Driver is ticked from a [single active timer](https://hg.mozilla.org/mozilla-central/file/ab0490972e1e/layout/base/nsRefreshDriver.cpp#l11).
-The assumption is that there are multiple **RefreshDrivers** connected to a single **RefreshTimer**.
-There are two **RefreshTimers**: an active and an inactive **RefreshTimer**.
-Each Tab has its own **RefreshDriver**, which connects to one of the global **RefreshTimers**.
-The **RefreshTimers** execute on the *Main Thread* and tick their connected **RefreshDrivers**.
-We do not want to break this model of multiple **RefreshDrivers** per a set of two global **RefreshTimers**.
-Each **RefreshDriver** switches between the active and inactive **RefreshTimer**.
-
-Instead, we create a new **RefreshTimer**, the **VsyncRefreshTimer** which ticks based on vsync messages.
-We replace the current active timer with a **VsyncRefreshTimer**.
-All tabs will then tick based on this new active timer.
-Since the **RefreshTimer** has a lifetime of the process, we only need to create a single **RefreshTimerVsyncDispatcher** per **Display** when Firefox starts.
-Even if we do not have any content processes, the Chrome process will still need a **VsyncRefreshTimer**, thus we can associate the **RefreshTimerVsyncDispatcher** with each **Display**.
-
-When Firefox starts, we initially create a new **VsyncRefreshTimer** in the Chrome process.
-The **VsyncRefreshTimer** will listen to vsync notifications from **RefreshTimerVsyncDispatcher** on the global **Display**.
-When nsRefreshDriver::Shutdown executes, it will delete the **VsyncRefreshTimer**.
-This creates a problem as all the **RefreshTimers** are currently manually memory managed whereas **VsyncObservers** are ref counted.
-To work around this problem, we create a new **RefreshDriverVsyncObserver** as an inner class to **VsyncRefreshTimer**, which actually receives vsync notifications. It then ticks the **RefreshDrivers** inside **VsyncRefreshTimer**.
-
-With Content processes, the start up process is more complicated.
-We send vsync IPC messages via the use of the PBackground thread on the parent process, which allows us to send messages from the Parent process' without waiting on the *main thread*.
-This sends messages from the Parent::*PBackground Thread* to the Child::*Main Thread*.
-The *main thread* receiving IPC messages on the content process is acceptable because **RefreshDrivers** must execute on the *main thread*.
-However, there is some amount of time required to setup the IPC connection upon process creation and during this time, the **RefreshDrivers** must tick to set up the process.
-To get around this, we initially use software **RefreshTimers** that already exist during content process startup and swap in the **VsyncRefreshTimer** once the IPC connection is created.
-
-During nsRefreshDriver::ChooseTimer, we create an async PBackground IPC open request to create a **VsyncParent** and **VsyncChild**.
-At the same time, we create a software **RefreshTimer** and tick the **RefreshDrivers** as normal.
-Once the PBackground callback is executed and an IPC connection exists, we swap all **RefreshDrivers** currently associated with the active **RefreshTimer** and swap the **RefreshDrivers** to use the **VsyncRefreshTimer**.
-Since all interactions on the content process occur on the main thread, there are no need for locks.
-The **VsyncParent** listens to vsync events through the **VsyncRefreshTimerDispatcher** on the parent side and sends vsync IPC messages to the **VsyncChild**.
-The **VsyncChild** notifies the **VsyncRefreshTimer** on the content process.
-
-During the shutdown process of the content process, ActorDestroy is called on the **VsyncChild** and **VsyncParent** due to the normal PBackground shutdown process.
-Once ActorDestroy is called, no IPC messages should be sent across the channel.
-After ActorDestroy is called, the IPDL machinery will delete the **VsyncParent/Child** pair.
-The **VsyncParent**, due to being a **VsyncObserver**, is ref counted.
-After **VsyncParent::ActorDestroy** is called, it unregisters itself from the **RefreshTimerVsyncDispatcher**, which holds the last reference to the **VsyncParent**, and the object will be deleted.
-
-Thus the overall flow during normal execution is:
-
-1. VsyncSource::Display::RefreshTimerVsyncDispatcher receives a Vsync notification from the OS in the parent process.
-2. RefreshTimerVsyncDispatcher notifies VsyncRefreshTimer::RefreshDriverVsyncObserver that a vsync occured on the parent process on the hardware vsync thread.
-3. RefreshTimerVsyncDispatcher notifies the VsyncParent on the hardware vsync thread that a vsync occured.
-4. The VsyncRefreshTimer::RefreshDriverVsyncObserver in the parent process posts a task to the main thread that ticks the refresh drivers.
-5. VsyncParent posts a task to the PBackground thread to send a vsync IPC message to VsyncChild.
-6. VsyncChild receive a vsync notification on the content process on the main thread and ticks their respective RefreshDrivers.
-
-###Compressing Vsync Messages
-Vsync messages occur quite often and the *main thread* can be busy for long periods of time due to JavaScript.
-Consistently sending vsync messages to the refresh driver timer can flood the *main thread* with refresh driver ticks, causing even more delays.
-To avoid this problem, we compress vsync messages on both the parent and child processes.
-
-On the parent process, newer vsync messages update a vsync timestamp but do not actually queue any tasks on the *main thread*.
-Once the parent process' *main thread* executes the refresh driver tick, it uses the most updated vsync timestamp to tick the refresh driver.
-After the refresh driver has ticked, one single vsync message is queued for another refresh driver tick task.
-On the content process, the IPDL **compress** keyword automatically compresses IPC messages.
-
-### Multiple Monitors
-In order to have multiple monitor support for the **RefreshDrivers**, we have multiple active **RefreshTimers**.
-Each **RefreshTimer** is associated with a specific **Display** via an id and tick when it's respective **Display** vsync occurs.
-We have **N RefreshTimers**, where N is the number of connected displays.
-Each **RefreshTimer** still has multiple **RefreshDrivers**.
-
-When a tab or window changes monitors, the **nsIWidget** receives a display changed notification.
-Based on which display the window is on, the window switches to the correct **RefreshTimerVsyncDispatcher** and **CompositorVsyncDispatcher** on the parent process based on the display id.
-Each **TabParent** should also send a notification to their child.
-Each **TabChild**, given the display ID, switches to the correct **RefreshTimer** associated with the display ID.
-When each display vsync occurs, it sends one IPC message to notify vsync.
-The vsync message contains a display ID, to tick the appropriate **RefreshTimer** on the content process.
-There is still only one **VsyncParent/VsyncChild** pair, just each vsync notification will include a display ID, which maps to the correct **RefreshTimer**.
-
-#Object Lifetime
-1. CompositorVsyncDispatcher - Lives as long as the nsBaseWidget associated with the VsyncDispatcher
-2. CompositorVsyncScheduler::Observer - Lives and dies the same time as the CompositorBridgeParent.
-3. RefreshTimerVsyncDispatcher - As long as the associated display object, which is the lifetime of Firefox.
-4. VsyncSource - Lives as long as the gfxPlatform on the chrome process, which is the lifetime of Firefox.
-5. VsyncParent/VsyncChild - Lives as long as the content process
-6. RefreshTimer - Lives as long as the process
-
-#Threads
-All **VsyncObservers** are notified on the *Hardware Vsync Thread*. It is the responsibility of the **VsyncObservers** to post tasks to their respective correct thread. For example, the **CompositorVsyncScheduler::Observer** will be notified on the *Hardware Vsync Thread*, and post a task to the *Compositor Thread* to do the actual composition.
-
-1. Compositor Thread - Nothing changes
-2. Main Thread - PVsyncChild receives IPC messages on the main thread. We also enable/disable vsync on the main thread.
-3. PBackground Thread - Creates a connection from the PBackground thread on the parent process to the main thread in the content process.
-4. Hardware Vsync Thread - Every platform is different, but we always have the concept of a hardware vsync thread. Sometimes this is actually created by the host OS. On Windows, we have to create a separate platform thread that blocks on DwmFlush().
new file mode 100644
--- /dev/null
+++ b/gfx/docs/AdvancedLayers.rst
@@ -0,0 +1,370 @@
+Advanced Layers
+===============
+
+Advanced Layers is a new method of compositing layers in Gecko. This
+document serves as a technical overview and provides a short
+walk-through of its source code.
+
+Overview
+--------
+
+Advanced Layers attempts to group as many GPU operations as it can into
+a single draw call. This is a common technique in GPU-based rendering
+called “batching”. It is not always trivial, as a batching algorithm can
+easily waste precious CPU resources trying to build optimal draw calls.
+
+Advanced Layers reuses the existing Gecko layers system as much as
+possible. Huge layer trees do not currently scale well (see the future
+work section), so opportunities for batching are currently limited
+without expending unnecessary resources elsewhere. However, Advanced
+Layers has a few benefits:
+
+-  It submits smaller GPU workloads and buffer uploads than the existing
+   compositor.
+-  It needs only a single pass over the layer tree.
+-  It uses occlusion information more intelligently.
+-  It is easier to add new specialized rendering paths and new layer
+   types.
+-  It separates compositing logic from device logic, unlike the existing
+   compositor.
+-  It is much faster at rendering 3d scenes or complex layer trees.
+-  It has experimental code to use the z-buffer for occlusion culling.
+
+Because of these benefits we hope that it provides a significant
+improvement over the existing compositor.
+
+Advanced Layers uses the acronym “MLG” and “MLGPU” in many places. This
+stands for “Mid-Level Graphics”, the idea being that it is optimized for
+Direct3D 11-style rendering systems as opposed to Direct3D 12 or Vulkan.
+
+LayerManagerMLGPU
+-----------------
+
+Advanced layers does not change client-side rendering at all. Content
+still uses Direct2D (when possible), and creates identical layer trees
+as it would with a normal Direct3D 11 compositor. In fact, Advanced
+Layers re-uses all of the existing texture handling and video
+infrastructure as well, replacing only the composite-side layer types.
+
+Advanced Layers does not create a ``LayerManagerComposite`` - instead,
+it creates a ``LayerManagerMLGPU``. This layer manager does not have a
+``Compositor`` - instead, it has an ``MLGDevice``, which roughly
+abstracts the Direct3D 11 API. (The hope is that this API is easily
+interchangeable for something else when cross-platform or software
+support is needed.)
+
+``LayerManagerMLGPU`` also dispenses with the old “composite” layers for
+new layer types. For example, ``ColorLayerComposite`` becomes
+``ColorLayerMLGPU``. Since these layer types implement ``HostLayer``,
+they integrate with ``LayerTransactionParent`` as normal composite
+layers would.
+
+Rendering Overview
+------------------
+
+The steps for rendering are described in more detail below, but roughly
+the process is:
+
+1. Sort layers front-to-back.
+2. Create a dependency tree of render targets (called “views”).
+3. Accumulate draw calls for all layers in each view.
+4. Upload draw call buffers to the GPU.
+5. Execute draw commands for each view.
+
+Advanced Layers divides the layer tree into “views”
+(``RenderViewMLGPU``), which correspond to a render target. The root
+layer is represented by a view corresponding to the screen. Layers that
+require intermediate surfaces have temporary views. Layers are analyzed
+front-to-back, and rendered back-to-front within a view. Views
+themselves are rendered front-to-back, to minimize render target
+switching.
+
+Each view contains one or more rendering passes (``RenderPassMLGPU``). A
+pass represents a single draw command with one or more rendering items
+attached to it. For example, a ``SolidColorPass`` item contains a
+rectangle and an RGBA value, and many of these can be drawn with a
+single GPU call.
+
+When considering a layer, views will first try to find an existing
+rendering batch that can support it. If so, that pass will accumulate
+another draw item for the layer. Otherwise, a new pass will be added.
+
+When trying to find a matching pass for a layer, there is a tradeoff in
+CPU time versus the GPU time saved by not issuing another draw commands.
+We generally care more about CPU time, so we do not try too hard in
+matching items to an existing batch.
+
+After all layers have been processed, there is a “prepare” step. This
+copies all accumulated draw data and uploads it into vertex and constant
+buffers in the GPU.
+
+Finally, we execute rendering commands. At the end of the frame, all
+batches and (most) constant buffers are thrown away.
+
+Shaders Overview
+----------------
+
+Advanced Layers currently has five layer-related shader pipelines:
+
+-  Textured (PaintedLayer, ImageLayer, CanvasLayer)
+-  ComponentAlpha (PaintedLayer with component-alpha)
+-  YCbCr (ImageLayer with YCbCr video)
+-  Color (ColorLayers)
+-  Blend (ContainerLayers with mix-blend modes)
+
+There are also three special shader pipelines:
+
+-  MaskCombiner, which is used to combine mask layers into a single
+   texture.
+-  Clear, which is used for fast region-based clears when not directly
+   supported by the GPU.
+-  Diagnostic, which is used to display the diagnostic overlay texture.
+
+The layer shaders follow a unified structure. Each pipeline has a vertex
+and pixel shader. The vertex shader takes a layers ID, a z-buffer depth,
+a unit position in either a unit square or unit triangle, and either
+rectangular or triangular geometry. Shaders can also have ancillary data
+needed like texture coordinates or colors.
+
+Most of the time, layers have simple rectangular clips with simple
+rectilinear transforms, and pixel shaders do not need to perform masking
+or clipping. For these layers we use a fast-path pipeline, using
+unit-quad shaders that are able to clip geometry so the pixel shader
+does not have to. This type of pipeline does not support complex masks.
+
+If a layer has a complex mask, a rotation or 3d transform, or a complex
+operation like blending, then we use shaders capable of handling
+arbitrary geometry. Their input is a unit triangle, and these shaders
+are generally more expensive.
+
+All of the shader-specific data is modelled in ShaderDefinitionsMLGPU.h.
+
+CPU Occlusion Culling
+---------------------
+
+By default, Advanced Layers performs occlusion culling on the CPU. Since
+layers are visited front-to-back, this is simply a matter of
+accumulating the visible region of opaque layers, and subtracting it
+from the visible region of subsequent layers. There is a major
+difference between this occlusion culling and PostProcessLayers of the
+old compositor: AL performs culling after invalidation, not before.
+Completely valid layers will have an empty visible region.
+
+Most layer types (with the exception of images) will intelligently split
+their draw calls into a batch of individual rectangles, based on their
+visible region.
+
+Z-Buffering and Occlusion
+-------------------------
+
+Advanced Layers also supports occlusion culling on the GPU, using a
+z-buffer. This is disabled by default currently since it is
+significantly costly on integrated GPUs. When using the z-buffer, we
+separate opaque layers into a separate list of passes. The render
+process then uses the following steps:
+
+1. The depth buffer is set to read-write.
+2. Opaque batches are executed.,
+3. The depth buffer is set to read-only.
+4. Transparent batches are executed.
+
+The problem we have observed is that the depth buffer increases writes
+to the GPU, and on integrated GPUs this is expensive - we have seen draw
+call times increase by 20-30%, which is the wrong direction we want to
+take on battery life. In particular on a full screen video, the call to
+ClearDepthStencilView plus the actual depth buffer write of the video
+can double GPU time.
+
+For now the depth-buffer is disabled until we can find a compelling case
+for it on non-integrated hardware.
+
+Clipping
+--------
+
+Clipping is a bit tricky in Advanced Layers. We cannot use the hardware
+“scissor” feature, since the clip can change from instance to instance
+within a batch. And if using the depth buffer, we cannot write
+transparent pixels for the clipped area. As a result we always clip
+opaque draw rects in the vertex shader (and sometimes even on the CPU,
+as is needed for sane texture coordiantes). Only transparent items are
+clipped in the pixel shader. As a result, masked layers and layers with
+non-rectangular transforms are always considered transparent, and use a
+more flexible clipping pipeline.
+
+Plane Splitting
+---------------
+
+Plane splitting is when a 3D transform causes a layer to be split - for
+example, one transparent layer may intersect another on a separate
+plane. When this happens, Gecko sorts layers using a BSP tree and
+produces a list of triangles instead of draw rects.
+
+These layers cannot use the “unit quad” shaders that support the fast
+clipping pipeline. Instead they always use the full triangle-list
+shaders that support extended vertices and clipping.
+
+This is the slowest path we can take when building a draw call, since we
+must interact with the polygon clipping and texturing code.
+
+Masks
+-----
+
+For each layer with a mask attached, Advanced Layers builds a
+``MaskOperation``. These operations must resolve to a single mask
+texture, as well as a rectangular area to which the mask applies. All
+batched pixel shaders will automatically clip pixels to the mask if a
+mask texture is bound. (Note that we must use separate batches if the
+mask texture changes.)
+
+Some layers have multiple mask textures. In this case, the MaskOperation
+will store the list of masks, and right before rendering, it will invoke
+a shader to combine these masks into a single texture.
+
+MaskOperations are shared across layers when possible, but are not
+cached across frames.
+
+BigImage Support
+----------------
+
+ImageLayers and CanvasLayers can be tiled with many individual textures.
+This happens in rare cases where the underlying buffer is too big for
+the GPU. Early on this caused problems for Advanced Layers, since AL
+required one texture per layer. We implemented BigImage support by
+creating temporary ImageLayers for each visible tile, and throwing those
+layers away at the end of the frame.
+
+Advanced Layers no longer has a 1:1 layer:texture restriction, but we
+retain the temporary layer solution anyway. It is not much code and it
+means we do not have to split ``TexturedLayerMLGPU`` methods into
+iterated and non-iterated versions.
+
+Texture Locking
+---------------
+
+Advanced Layers has a different texture locking scheme than the existing
+compositor. If a texture needs to be locked, then it is locked by the
+MLGDevice automatically when bound to the current pipeline. The
+MLGDevice keeps a set of the locked textures to avoid double-locking. At
+the end of the frame, any textures in the locked set are unlocked.
+
+We cannot easily replicate the locking scheme in the old compositor,
+since the duration of using the texture is not scoped to when we visit
+the layer.
+
+Buffer Measurements
+-------------------
+
+Advanced Layers uses constant buffers to send layer information and
+extended instance data to the GPU. We do this by pre-allocating large
+constant buffers and mapping them with ``MAP_DISCARD`` at the beginning
+of the frame. Batches may allocate into this up to the maximum bindable
+constant buffer size of the device (currently, 64KB).
+
+There are some downsides to this approach. Constant buffers are
+difficult to work with - they have specific alignment requirements, and
+care must be taken not too run over the maximum number of constants in a
+buffer. Another approach would be to store constants in a 2D texture and
+use vertex shader texture fetches. Advanced Layers implemented this and
+benchmarked it to decide which approach to use. Textures seemed to skew
+better on GPU performance, but worse on CPU, but this varied depending
+on the GPU. Overall constant buffers performed best and most
+consistently, so we have kept them.
+
+Additionally, we tested different ways of performing buffer uploads.
+Buffer creation itself is costly, especially on integrated GPUs, and
+especially so for immutable, immediate-upload buffers. As a result we
+aggressively cache buffer objects and always allocate them as
+MAP_DISCARD unless they are write-once and long-lived.
+
+Buffer Types
+------------
+
+Advanced Layers has a few different classes to help build and upload
+buffers to the GPU. They are:
+
+-  ``MLGBuffer``. This is the low-level shader resource that
+   ``MLGDevice`` exposes. It is the building block for buffer helper
+   classes, but it can also be used to make one-off, immutable,
+   immediate-upload buffers. MLGBuffers, being a GPU resource, are
+   reference counted.
+-  ``SharedBufferMLGPU``. These are large, pre-allocated buffers that
+   are read-only on the GPU and write-only on the CPU. They usually
+   exceed the maximum bindable buffer size. There are three shared
+   buffers created by default and they are automatically unmapped as
+   needed: one for vertices, one for vertex shader constants, and one
+   for pixel shader constants. When callers allocate into a shared
+   buffer they get back a mapped pointer, a GPU resource, and an offset.
+   When the underlying device supports offsetable buffers (like
+   ``ID3D11DeviceContext1`` does), this results in better GPU
+   utilization, as there are less resources and fewer upload commands.
+-  ``ConstantBufferSection`` and ``VertexBufferSection``. These are
+   “views” into a ``SharedBufferMLGPU``. They contain the underlying
+   ``MLGBuffer``, and when offsetting is supported, the offset
+   information necessary for resource binding. Sections are not
+   reference counted.
+-  ``StagingBuffer``. A dynamically sized CPU buffer where items can be
+   appended in a free-form manner. The stride of a single “item” is
+   computed by the first item written, and successive items must have
+   the same stride. The buffer must be uploaded to the GPU manually.
+   Staging buffers are appropriate for creating general constant or
+   vertex buffer data. They can also write items in reverse, which is
+   how we render back-to-front when layers are visited front-to-back.
+   They can be uploaded to a ``SharedBufferMLGPU`` or an immutabler
+   ``MLGBuffer`` very easily. Staging buffers are not reference counted.
+
+Unsupported Features
+--------------------
+
+Currently, these features of the old compositor are not yet implemented.
+
+-  OpenGL and software support (currently AL only works on D3D11).
+-  APZ displayport overlay.
+-  Diagnostic/developer overlays other than the FPS/timing overlay.
+-  DEAA. It was never ported to the D3D11 compositor, but we would like
+   it.
+-  Component alpha when used inside an opaque intermediate surface.
+-  Effects prefs. Possibly not needed post-B2G removal.
+-  Widget overlays and underlays used by macOS and Android.
+-  DefaultClearColor. This is Android specific, but is easy to added
+   when needed.
+-  Frame uniformity info in the profiler. Possibly not needed post-B2G
+   removal.
+-  LayerScope. There are no plans to make this work.
+
+Future Work
+-----------
+
+-  Refactor for D3D12/Vulkan support (namely, split MLGDevice into
+   something less stateful and something else more low-level).
+-  Remove “MLG” moniker and namespace everything.
+-  Other backends (D3D12/Vulkan, OpenGL, Software)
+-  Delete CompositorD3D11
+-  Add DEAA support
+-  Re-enable the depth buffer by default for fast GPUs
+-  Re-enable right-sizing of inaccurately sized containers
+-  Drop constant buffers for ancillary vertex data
+-  Fast shader paths for simple video/painted layer cases
+
+History
+-------
+
+Advanced Layers has gone through four major design iterations. The
+initial version used tiling - each render view divided the screen into
+128x128 tiles, and layers were assigned to tiles based on their
+screen-space draw area. This approach proved not to scale well to 3d
+transforms, and so tiling was eliminated.
+
+We replaced it with a simple system of accumulating draw regions to each
+batch, thus ensuring that items could be assigned to batches while
+maintaining correct z-ordering. This second iteration also coincided
+with plane-splitting support.
+
+On large layer trees, accumulating the affected regions of batches
+proved to be quite expensive. This led to a third iteration, using depth
+buffers and separate opaque and transparent batch lists to achieve
+z-ordering and occlusion culling.
+
+Finally, depth buffers proved to be too expensive, and we introduced a
+simple CPU-based occlusion culling pass. This iteration coincided with
+using more precise draw rects and splitting pipelines into unit-quad,
+cpu-clipped and triangle-list, gpu-clipped variants.
new file mode 100644
--- /dev/null
+++ b/gfx/docs/AsyncPanZoom.rst
@@ -0,0 +1,452 @@
+.. _apz:
+
+Asynchronous Panning and Zooming
+================================
+
+**This document is a work in progress. Some information may be missing
+or incomplete.**
+
+.. image:: AsyncPanZoomArchitecture.png
+
+Goals
+-----
+
+We need to be able to provide a visual response to user input with
+minimal latency. In particular, on devices with touch input, content
+must track the finger exactly while panning, or the user experience is
+very poor. According to the UX team, 120ms is an acceptable latency
+between user input and response.
+
+Context and surrounding architecture
+------------------------------------
+
+The fundamental problem we are trying to solve with the Asynchronous
+Panning and Zooming (APZ) code is that of responsiveness. By default,
+web browsers operate in a “game loop” that looks like this:
+
+::
+
+       while true:
+           process input
+           do computations
+           repaint content
+           display repainted content
+
+In browsers the “do computation” step can be arbitrarily expensive
+because it can involve running event handlers in web content. Therefore,
+there can be an arbitrary delay between the input being received and the
+on-screen display getting updated.
+
+Responsiveness is always good, and with touch-based interaction it is
+even more important than with mouse or keyboard input. In order to
+ensure responsiveness, we split the “game loop” model of the browser
+into a multithreaded variant which looks something like this:
+
+::
+
+       Thread 1 (compositor thread)
+       while true:
+           receive input
+           send a copy of input to thread 2
+           adjust painted content based on input
+           display adjusted painted content
+       
+       Thread 2 (main thread)
+       while true:
+           receive input from thread 1
+           do computations
+           repaint content
+           update the copy of painted content in thread 1
+
+This multithreaded model is called off-main-thread compositing (OMTC),
+because the compositing (where the content is displayed on-screen)
+happens on a separate thread from the main thread. Note that this is a
+very very simplified model, but in this model the “adjust painted
+content based on input” is the primary function of the APZ code.
+
+The “painted content” is stored on a set of “layers”, that are
+conceptually double-buffered. That is, when the main thread does its
+repaint, it paints into one set of layers (the “client” layers). The
+update that is sent to the compositor thread copies all the changes from
+the client layers into another set of layers that the compositor holds.
+These layers are called the “shadow” layers or the “compositor” layers.
+The compositor in theory can continuously composite these shadow layers
+to the screen while the main thread is busy doing other things and
+painting a new set of client layers.
+
+The APZ code takes the input events that are coming in from the hardware
+and uses them to figure out what the user is trying to do (e.g. pan the
+page, zoom in). It then expresses this user intention in the form of
+translation and/or scale transformation matrices. These transformation
+matrices are applied to the shadow layers at composite time, so that
+what the user sees on-screen reflects what they are trying to do as
+closely as possible.
+
+Technical overview
+------------------
+
+As per the heavily simplified model described above, the fundamental
+purpose of the APZ code is to take input events and produce
+transformation matrices. This section attempts to break that down and
+identify the different problems that make this task non-trivial.
+
+Checkerboarding
+~~~~~~~~~~~~~~~
+
+The content area that is painted and stored in a shadow layer is called
+the “displayport”. The APZ code is responsible for determining how large
+the displayport should be. On the one hand, we want the displayport to
+be as large as possible. At the very least it needs to be larger than
+what is visible on-screen, because otherwise, as soon as the user pans,
+there will be some unpainted area of the page exposed. However, we
+cannot always set the displayport to be the entire page, because the
+page can be arbitrarily long and this would require an unbounded amount
+of memory to store. Therefore, a good displayport size is one that is
+larger than the visible area but not so large that it is a huge drain on
+memory. Because the displayport is usually smaller than the whole page,
+it is always possible for the user to scroll so fast that they end up in
+an area of the page outside the displayport. When this happens, they see
+unpainted content; this is referred to as “checkerboarding”, and we try
+to avoid it where possible.
+
+There are many possible ways to determine what the displayport should be
+in order to balance the tradeoffs involved (i.e. having one that is too
+big is bad for memory usage, and having one that is too small results in
+excessive checkerboarding). Ideally, the displayport should cover
+exactly the area that we know the user will make visible. Although we
+cannot know this for sure, we can use heuristics based on current
+panning velocity and direction to ensure a reasonably-chosen displayport
+area. This calculation is done in the APZ code, and a new desired
+displayport is frequently sent to the main thread as the user is panning
+around.
+
+Multiple layers
+~~~~~~~~~~~~~~~
+
+Consider, for example, a scrollable page that contains an iframe which
+itself is scrollable. The iframe can be scrolled independently of the
+top-level page, and we would like both the page and the iframe to scroll
+responsively. This means that we want independent asynchronous panning
+for both the top-level page and the iframe. In addition to iframes,
+elements that have the overflow:scroll CSS property set are also
+scrollable, and also end up on separate scrollable layers. In the
+general case, the layers are arranged in a tree structure, and so within
+the APZ code we have a matching tree of AsyncPanZoomController (APZC)
+objects, one for each scrollable layer. To manage this tree of APZC
+instances, we have a single APZCTreeManager object. Each APZC is
+relatively independent and handles the scrolling for its associated
+layer, but there are some cases in which they need to interact; these
+cases are described in the sections below.
+
+Hit detection
+~~~~~~~~~~~~~
+
+Consider again the case where we have a scrollable page that contains an
+iframe which itself is scrollable. As described above, we will have two
+APZC instances - one for the page and one for the iframe. When the user
+puts their finger down on the screen and moves it, we need to do some
+sort of hit detection in order to determine whether their finger is on
+the iframe or on the top-level page. Based on where their finger lands,
+the appropriate APZC instance needs to handle the input. This hit
+detection is also done in the APZCTreeManager, as it has the necessary
+information about the sizes and positions of the layers. Currently this
+hit detection is not perfect, as it uses rects and does not account for
+things like rounded corners and opacity.
+
+Also note that for some types of input (e.g. when the user puts two
+fingers down to do a pinch) we do not want the input to be “split”
+across two different APZC instances. In the case of a pinch, for
+example, we find a “common ancestor” APZC instance - one that is
+zoomable and contains all of the touch input points, and direct the
+input to that APZC instance.
+
+Scroll Handoff
+~~~~~~~~~~~~~~
+
+Consider yet again the case where we have a scrollable page that
+contains an iframe which itself is scrollable. Say the user scrolls the
+iframe so that it reaches the bottom. If the user continues panning on
+the iframe, the expectation is that the top-level page will start
+scrolling. However, as discussed in the section on hit detection, the
+APZC instance for the iframe is separate from the APZC instance for the
+top-level page. Thus, we need the two APZC instances to communicate in
+some way such that input events on the iframe result in scrolling on the
+top-level page. This behaviour is referred to as “scroll handoff” (or
+“fling handoff” in the case where analogous behaviour results from the
+scrolling momentum of the page after the user has lifted their finger).
+
+Input event untransformation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The APZC architecture by definition results in two copies of a “scroll
+position” for each scrollable layer. There is the original copy on the
+main thread that is accessible to web content and the layout and
+painting code. And there is a second copy on the compositor side, which
+is updated asynchronously based on user input, and corresponds to what
+the user visually sees on the screen. Although these two copies may
+diverge temporarily, they are reconciled periodically. In particular,
+they diverge while the APZ code is performing an async pan or zoom
+action on behalf of the user, and are reconciled when the APZ code
+requests a repaint from the main thread.
+
+Because of the way input events are stored, this has some unfortunate
+consequences. Input events are stored relative to the device screen - so
+if the user touches at the same physical spot on the device, the same
+input events will be delivered regardless of the content scroll
+position. When the main thread receives a touch event, it combines that
+with the content scroll position in order to figure out what DOM element
+the user touched. However, because we now have two different scroll
+positions, this process may not work perfectly. A concrete example
+follows:
+
+Consider a device with screen size 600 pixels tall. On this device, a
+user is viewing a document that is 1000 pixels tall, and that is
+scrolled down by 200 pixels. That is, the vertical section of the
+document from 200px to 800px is visible. Now, if the user touches a
+point 100px from the top of the physical display, the hardware will
+generate a touch event with y=100. This will get sent to the main
+thread, which will add the scroll position (200) and get a
+document-relative touch event with y=300. This new y-value will be used
+in hit detection to figure out what the user touched. If the document
+had a absolute-positioned div at y=300, then that would receive the
+touch event.
+
+Now let us add some async scrolling to this example. Say that the user
+additionally scrolls the document by another 10 pixels asynchronously
+(i.e. only on the compositor thread), and then does the same touch
+event. The same input event is generated by the hardware, and as before,
+the document will deliver the touch event to the div at y=300. However,
+visually, the document is scrolled by an additional 10 pixels so this
+outcome is wrong. What needs to happen is that the APZ code needs to
+intercept the touch event and account for the 10 pixels of asynchronous
+scroll. Therefore, the input event with y=100 gets converted to y=110 in
+the APZ code before being passed on to the main thread. The main thread
+then adds the scroll position it knows about and determines that the
+user touched at a document-relative position of y=310.
+
+Analogous input event transformations need to be done for horizontal
+scrolling and zooming.
+
+Content independently adjusting scrolling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, there are two copies of the scroll position in the
+APZ architecture - one on the main thread and one on the compositor
+thread. Usually for architectures like this, there is a single “source
+of truth” value and the other value is simply a copy. However, in this
+case that is not easily possible to do. The reason is that both of these
+values can be legitimately modified. On the compositor side, the input
+events the user is triggering modify the scroll position, which is then
+propagated to the main thread. However, on the main thread, web content
+might be running Javascript code that programatically sets the scroll
+position (via window.scrollTo, for example). Scroll changes driven from
+the main thread are just as legitimate and need to be propagated to the
+compositor thread, so that the visual display updates in response.
+
+Because the cross-thread messaging is asynchronous, reconciling the two
+types of scroll changes is a tricky problem. Our design solves this
+using various flags and generation counters. The general heuristic we
+have is that content-driven scroll position changes (e.g. scrollTo from
+JS) are never lost. For instance, if the user is doing an async scroll
+with their finger and content does a scrollTo in the middle, then some
+of the async scroll would occur before the “jump” and the rest after the
+“jump”.
+
+Content preventing default behaviour of input events
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Another problem that we need to deal with is that web content is allowed
+to intercept touch events and prevent the “default behaviour” of
+scrolling. This ability is defined in web standards and is
+non-negotiable. Touch event listeners in web content are allowed call
+preventDefault() on the touchstart or first touchmove event for a touch
+point; doing this is supposed to “consume” the event and prevent
+touch-based panning. As we saw in a previous section, the input event
+needs to be untransformed by the APZ code before it can be delivered to
+content. But, because of the preventDefault problem, we cannot fully
+process the touch event in the APZ code until content has had a chance
+to handle it. Web browsers in general solve this problem by inserting a
+delay of up to 300ms before processing the input - that is, web content
+is allowed up to 300ms to process the event and call preventDefault on
+it. If web content takes longer than 300ms, or if it completes handling
+of the event without calling preventDefault, then the browser
+immediately starts processing the events.
+
+The way the APZ implementation deals with this is that upon receiving a
+touch event, it immediately returns an untransformed version that can be
+dispatched to content. It also schedules a 400ms timeout (600ms on
+Android) during which content is allowed to prevent scrolling. There is
+an API that allows the main-thread event dispatching code to notify the
+APZ as to whether or not the default action should be prevented. If the
+APZ content response timeout expires, or if the main-thread event
+dispatching code notifies the APZ of the preventDefault status, then the
+APZ continues with the processing of the events (which may involve
+discarding the events).
+
+The touch-action CSS property from the pointer-events spec is intended
+to allow eliminating this 400ms delay in many cases (although for
+backwards compatibility it will still be needed for a while). Note that
+even with touch-action implemented, there may be cases where the APZ
+code does not know the touch-action behaviour of the point the user
+touched. In such cases, the APZ code will still wait up to 400ms for the
+main thread to provide it with the touch-action behaviour information.
+
+Technical details
+-----------------
+
+This section describes various pieces of the APZ code, and goes into
+more specific detail on APIs and code than the previous sections. The
+primary purpose of this section is to help people who plan on making
+changes to the code, while also not going into so much detail that it
+needs to be updated with every patch.
+
+Overall flow of input events
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section describes how input events flow through the APZ code.
+
+1.  Input events arrive from the hardware/widget code into the APZ via
+    APZCTreeManager::ReceiveInputEvent. The thread that invokes this is
+    called the input thread, and may or may not be the same as the Gecko
+    main thread.
+2.  Conceptually the first thing that the APZCTreeManager does is to
+    associate these events with “input blocks”. An input block is a set
+    of events that share certain properties, and generally are intended
+    to represent a single gesture. For example with touch events, all
+    events following a touchstart up to but not including the next
+    touchstart are in the same block. All of the events in a given block
+    will go to the same APZC instance and will either all be processed
+    or all be dropped.
+3.  Using the first event in the input block, the APZCTreeManager does a
+    hit-test to see which APZC it hits. This hit-test uses the event
+    regions populated on the layers, which may be larger than the true
+    hit area of the layer. If no APZC is hit, the events are discarded
+    and we jump to step 6. Otherwise, the input block is tagged with the
+    hit APZC as a tentative target and put into a global APZ input
+    queue.
+4.
+
+    i.  If the input events landed outside the dispatch-to-content event
+        region for the layer, any available events in the input block
+        are processed. These may trigger behaviours like scrolling or
+        tap gestures.
+    ii. If the input events landed inside the dispatch-to-content event
+        region for the layer, the events are left in the queue and a
+        400ms timeout is initiated. If the timeout expires before step 9
+        is completed, the APZ assumes the input block was not cancelled
+        and the tentative target is correct, and processes them as part
+        of step 10.
+
+5.  The call stack unwinds back to APZCTreeManager::ReceiveInputEvent,
+    which does an in-place modification of the input event so that any
+    async transforms are removed.
+6.  The call stack unwinds back to the widget code that called
+    ReceiveInputEvent. This code now has the event in the coordinate
+    space Gecko is expecting, and so can dispatch it to the Gecko main
+    thread.
+7.  Gecko performs its own usual hit-testing and event dispatching for
+    the event. As part of this, it records whether any touch listeners
+    cancelled the input block by calling preventDefault(). It also
+    activates inactive scrollframes that were hit by the input events.
+8.  The call stack unwinds back to the widget code, which sends two
+    notifications to the APZ code on the input thread. The first
+    notification is via APZCTreeManager::ContentReceivedInputBlock, and
+    informs the APZ whether the input block was cancelled. The second
+    notification is via APZCTreeManager::SetTargetAPZC, and informs the
+    APZ of the results of the Gecko hit-test during event dispatch. Note
+    that Gecko may report that the input event did not hit any
+    scrollable frame at all. The SetTargetAPZC notification happens only
+    once per input block, while the ContentReceivedInputBlock
+    notification may happen once per block, or multiple times per block,
+    depending on the input type.
+9.
+
+    i.   If the events were processed as part of step 4(i), the
+         notifications from step 8 are ignored and step 10 is skipped.
+    ii.  If events were queued as part of step 4(ii), and steps 5-8 take
+         less than 400ms, the arrival of both notifications from step 8
+         will mark the input block ready for processing.
+    iii. If events were queued as part of step 4(ii), but steps 5-8 take
+         longer than 400ms, the notifications from step 8 will be
+         ignored and step 10 will already have happened.
+
+10. If events were queued as part of step 4(ii) they are now either
+    processed (if the input block was not cancelled and Gecko detected a
+    scrollframe under the input event, or if the timeout expired) or
+    dropped (all other cases). Note that the APZC that processes the
+    events may be different at this step than the tentative target from
+    step 3, depending on the SetTargetAPZC notification. Processing the
+    events may trigger behaviours like scrolling or tap gestures.
+
+If the CSS touch-action property is enabled, the above steps are
+modified as follows: \* In step 4, the APZC also requires the allowed
+touch-action behaviours for the input event. This might have been
+determined as part of the hit-test in APZCTreeManager; if not, the
+events are queued. \* In step 6, the widget code determines the content
+element at the point under the input element, and notifies the APZ code
+of the allowed touch-action behaviours. This notification is sent via a
+call to APZCTreeManager::SetAllowedTouchBehavior on the input thread. \*
+In step 9(ii), the input block will only be marked ready for processing
+once all three notifications arrive.
+
+Threading considerations
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The bulk of the input processing in the APZ code happens on what we call
+“the input thread”. In practice the input thread could be the Gecko main
+thread, the compositor thread, or some other thread. There are obvious
+downsides to using the Gecko main thread - that is, “asynchronous”
+panning and zooming is not really asynchronous as input events can only
+be processed while Gecko is idle. In an e10s environment, using the
+Gecko main thread of the chrome process is acceptable, because the code
+running in that process is more controllable and well-behaved than
+arbitrary web content. Using the compositor thread as the input thread
+could work on some platforms, but may be inefficient on others. For
+example, on Android (Fennec) we receive input events from the system on
+a dedicated UI thread. We would have to redispatch the input events to
+the compositor thread if we wanted to the input thread to be the same as
+the compositor thread. This introduces a potential for higher latency,
+particularly if the compositor does any blocking operations - blocking
+SwapBuffers operations, for example. As a result, the APZ code itself
+does not assume that the input thread will be the same as the Gecko main
+thread or the compositor thread.
+
+Active vs. inactive scrollframes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of scrollframes on a page is potentially unbounded. However,
+we do not want to create a separate layer for each scrollframe right
+away, as this would require large amounts of memory. Therefore,
+scrollframes as designated as either “active” or “inactive”. Active
+scrollframes are the ones that do have their contents put on a separate
+layer (or set of layers), and inactive ones do not.
+
+Consider a page with a scrollframe that is initially inactive. When
+layout generates the layers for this page, the content of the
+scrollframe will be flattened into some other PaintedLayer (call it P).
+The layout code also adds the area (or bounding region in case of weird
+shapes) of the scrollframe to the dispatch-to-content region of P.
+
+When the user starts interacting with that content, the hit-test in the
+APZ code finds the dispatch-to-content region of P. The input block
+therefore has a tentative target of P when it goes into step 4(ii) in
+the flow above. When gecko processes the input event, it must detect the
+inactive scrollframe and activate it, as part of step 7. Finally, the
+widget code sends the SetTargetAPZC notification in step 8 to notify the
+APZ that the input block should really apply to this new layer. The
+issue here is that the layer transaction containing the new layer must
+reach the compositor and APZ before the SetTargetAPZC notification. If
+this does not occur within the 400ms timeout, the APZ code will be
+unable to update the tentative target, and will continue to use P for
+that input block. Input blocks that start after the layer transaction
+will get correctly routed to the new layer as there will now be a layer
+and APZC instance for the active scrollframe.
+
+This model implies that when the user initially attempts to scroll an
+inactive scrollframe, it may end up scrolling an ancestor scrollframe.
+(This is because in the absence of the SetTargetAPZC notification, the
+input events will get applied to the closest ancestor scrollframe’s
+APZC.) Only after the round-trip to the gecko thread is complete is
+there a layer for async scrolling to actually occur on the scrollframe
+itself. At that point the scrollframe will start receiving new input
+blocks and will scroll normally.
rename from gfx/doc/AsyncPanZoom-HighLevel.png
rename to gfx/docs/AsyncPanZoomArchitecture.png
new file mode 100644
--- /dev/null
+++ b/gfx/docs/GraphicsOverview.rst
@@ -0,0 +1,159 @@
+Graphics Overview
+=========================
+
+Work in progress. Possibly incorrect or incomplete.
+---------------------------------------------------
+
+Jargon
+------
+
+There's a lot of jargon in the graphics stack. We try to maintain a list
+of common words and acronyms `here <https://wiki.mozilla.org/Platform/GFX/Jargon>`__.
+
+Overview
+--------
+
+The graphics systems is responsible for rendering (painting, drawing)
+the frame tree (rendering tree) elements as created by the layout
+system. Each leaf in the tree has content, either bounded by a rectangle
+(or perhaps another shape, in the case of SVG.)
+
+The simple approach for producing the result would thus involve
+traversing the frame tree, in a correct order, drawing each frame into
+the resulting buffer and displaying (printing non-withstanding) that
+buffer when the traversal is done. It is worth spending some time on the
+“correct order” note above. If there are no overlapping frames, this is
+fairly simple - any order will do, as long as there is no background. If
+there is background, we just have to worry about drawing that first.
+Since we do not control the content, chances are the page is more
+complicated. There are overlapping frames, likely with transparency, so
+we need to make sure the elements are draw “back to front”, in layers,
+so to speak. Layers are an important concept, and we will revisit them
+shortly, as they are central to fixing a major issue with the above
+simple approach.
+
+While the above simple approach will work, the performance will suffer.
+Each time anything changes in any of the frames, the complete process
+needs to be repeated, everything needs to be redrawn. Further, there is
+very little space to take advantage of the modern graphics (GPU)
+hardware, or multi-core computers. If you recall from the previous
+sections, the frame tree is only accessible from the UI thread, so while
+we’re doing all this work, the UI is basically blocked.
+
+(Retained) Layers
+~~~~~~~~~~~~~~~~~
+
+Layers framework was introduced to address the above performance issues,
+by having a part of the design address each item. At the high level:
+
+1. We create a layer tree. The leaf elements of the tree contain all
+   frames (possibly multiple frames per leaf).
+2. We render each layer tree element and cache (retain) the result.
+3. We composite (combine) all the leaf elements into the final result.
+
+Let’s examine each of these steps, in reverse order.
+
+Compositing
+~~~~~~~~~~~
+
+We use the term composite as it implies that the order is important. If
+the elements being composited overlap, whether there is transparency
+involved or not, the order in which they are combined will effect the
+result. Compositing is where we can use some of the power of the modern
+graphics hardware. It is optimal for doing this job. In the scenarios
+where only the position of individual frames changes, without the
+content inside them changing, we see why caching each layer would be
+advantageous - we only need to repeat the final compositing step,
+completely skipping the layer tree creation and the rendering of each
+leaf, thus speeding up the process considerably.
+
+Another benefit is equally apparent in the context of the stated
+deficiencies of the simple approach. We can use the available graphics
+hardware accelerated APIs to do the compositing step. Direct3D, OpenGL
+can be used on different platforms and are well suited to accelerate
+this step.
+
+Finally, we can now envision performing the compositing step on a
+separate thread, unblocking the UI thread for other work, and doing more
+work in parallel. More on this below.
+
+It is important to note that the number of operations in this step is
+proportional to the number of layer tree (leaf) elements, so there is
+additional work and complexity involved, when the layer tree is large.
+
+Render and retain layer elements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As we saw, the compositing step benefits from caching the intermediate
+result. This does result in the extra memory usage, so needs to be
+considered during the layer tree creation. Beyond the caching, we can
+accelerate the rendering of each element by (indirectly) using the
+available platform APIs (e.g., Direct2D, CoreGraphics, even some of the
+3D APIs like OpenGL or Direct3D) as available. This is actually done
+through a platform independent API (see Moz2D) below, but is important
+to realize it does get accelerated appropriately.
+
+Creating the layer tree
+~~~~~~~~~~~~~~~~~~~~~~~
+
+We need to create a layer tree (from the frames tree), which will give
+us the correct result while striking the right balance between a layer
+per frame element and a single layer for the complete frames tree. As
+was mentioned above, there is an overhead in traversing the whole tree
+and caching each of the elements, balanced by the performance
+improvements. Some of the performance improvements are only noticed when
+something changes (e.g., one element is moving, we only need to redo the
+compositing step).
+
+Refresh Driver
+~~~~~~~~~~~~~~
+
+Layers
+~~~~~~
+
+Rendering each layer
+~~~~~~~~~~~~~~~~~~~~
+
+Tiling vs. Buffer Rotation vs. Full paint
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Compositing for the final result
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Graphics API
+~~~~~~~~~~~~
+
+Moz2D
+~~~~~
+
+-  The Moz2D graphics API, part of the Azure project, is a
+   cross-platform interface onto the various graphics backends that
+   Gecko uses for rendering such as Direct2D (1.0 and 1.1), Skia, Cairo,
+   Quartz, and NV Path. Adding a new graphics platform to Gecko is
+   accomplished by adding a backend to Moz2D.
+   See `Moz2D documentation on wiki <https://wiki.mozilla.org/Platform/GFX/Moz2D>`__.
+
+Compositing
+~~~~~~~~~~~
+
+Image Decoding
+~~~~~~~~~~~~~~
+
+Image Animation
+~~~~~~~~~~~~~~~
+
+`Historical Documents <http://www.youtube.com/watch?v=lLZQz26-kms>`__
+---------------------------------------------------------------------
+
+A number of posts and blogs that will give you more details or more
+background, or reasoning that led to different solutions and approaches.
+
+-  2010-01 `Layers: Cross Platform Acceleration <http://www.basschouten.com/blog1.php/layers-cross-platform-acceleration>`__
+-  2010-04 `Layers <http://robert.ocallahan.org/2010/04/layers_01.html>`__
+-  2010-07 `Retained Layers <http://robert.ocallahan.org/2010/07/retained-layers_16.html>`__
+-  2011-04 `Introduction <https://blog.mozilla.org/joe/2011/04/26/introducing-the-azure-project/%20Moz2D>`__
+-  2011-07 `Layers <http://chrislord.net/index.php/2011/07/25/shadow-layers-and-learning-by-failing/%20Shadow>`__
+-  2011-09 `Graphics API Design <http://robert.ocallahan.org/2011/09/graphics-api-design.html>`__
+-  2012-04 `Moz2D Canvas on OSX <http://muizelaar.blogspot.ca/2012/04/azure-canvas-on-os-x.html>`__
+-  2012-05 `Mask Layers <http://featherweightmusings.blogspot.co.uk/2012/05/mask-layers_26.html>`__
+-  2013-07 `Graphics related <http://www.basschouten.com/blog1.php>`__
new file mode 100644
--- /dev/null
+++ b/gfx/docs/LayersHistory.rst
@@ -0,0 +1,63 @@
+Layers History
+==============
+
+This is an overview of the major events in the history of our Layers
+infrastructure.
+
+-  iPhone released in July 2007 (Built on a toolkit called LayerKit)
+
+-  Core Animation (October 2007) LayerKit was publicly renamed to OS X
+   10.5
+
+-  Webkit CSS 3d transforms (July 2009)
+
+-  Original layers API (March 2010) Introduced the idea of a layer
+   manager that would composite. One of the first use cases for this was
+   hardware accelerated YUV conversion for video.
+
+-  Retained layers (July 7 2010 - Bug 564991) This was an important
+   concept that introduced the idea of persisting the layer content
+   across paints in gecko controlled buffers instead of just by the OS.
+   This introduced the concept of buffer rotation to deal with scrolling
+   instead of using the native scrolling APIs like ScrollWindowEx
+
+-  Layers IPC (July 2010 - Bug 570294) This introduced shadow layers and
+   edit lists and was originally done for e10s v1
+
+-  3D transforms (September 2011 - Bug 505115)
+
+-  OMTC (December 2011 - Bug 711168) This was prototyped on OS X but
+   shipped first for Fennec
+
+-  Tiling v1 (April 2012 - Bug 739679) Originally done for Fennec. This
+   was done to avoid situations where we had to do a bunch of work for
+   scrolling a small amount. i.e. buffer rotation. It allowed us to have
+   a variety of interesting features like progressive painting and lower
+   resolution painting.
+
+-  C++ Async pan zoom controller (July 2012 - Bug 750974) The existing
+   APZ code was in Java for Fennec so this was reimplemented.
+
+-  Streaming WebGL Buffers (February 2013 - Bug 716859) Infrastructure
+   to allow OMTC WebGL and avoid the need to glFinish() every frame.
+
+-  Compositor API (April 2013 - Bug 825928) The planning for this
+   started around November 2012. Layers refactoring created a compositor
+   API that abstracted away the differences between the D3D vs OpenGL.
+   The main piece of API is DrawQuad.
+
+-  Tiling v2 (Mar 7 2014 - Bug 963073) Tiling for B2G. This work is
+   mainly porting tiled layers to new textures, implementing
+   double-buffered tiles and implementing a texture client pool, to be
+   used by tiled content clients.
+
+   A large motivation for the pool was the very slow performance of
+   allocating tiles because of the sync messages to the compositor.
+
+   The slow performance of allocating was directly addressed by bug 959089
+   which allowed us to allocate gralloc buffers without sync messages to
+   the compositor thread.
+
+-  B2G WebGL performance (May 2014 - Bug 1006957, 1001417, 1024144) This
+   work improved the synchronization mechanism between the compositor
+   and the producer.
new file mode 100644
--- /dev/null
+++ b/gfx/docs/Silk.rst
@@ -0,0 +1,469 @@
+Silk Overview
+==========================
+
+.. image:: SilkArchitecture.png
+
+Architecture
+------------
+
+Our current architecture is to align three components to hardware vsync
+timers:
+
+1. Compositor
+2. RefreshDriver / Painting
+3. Input Events
+
+The flow of our rendering engine is as follows:
+
+1. Hardware Vsync event occurs on an OS specific *Hardware Vsync Thread*
+   on a per monitor basis.
+2. The *Hardware Vsync Thread* attached to the monitor notifies the
+   ``CompositorVsyncDispatchers`` and ``RefreshTimerVsyncDispatcher``.
+3. For every Firefox window on the specific monitor, notify a
+   ``CompositorVsyncDispatcher``. The ``CompositorVsyncDispatcher`` is
+   specific to one window.
+4. The ``CompositorVsyncDispatcher`` notifies a
+   ``CompositorWidgetVsyncObserver`` when remote compositing, or a
+   ``CompositorVsyncScheduler::Observer`` when compositing in-process.
+5. If remote compositing, a vsync notification is sent from the
+   ``CompositorWidgetVsyncObserver`` to the ``VsyncBridgeChild`` on the
+   UI process, which sends an IPDL message to the ``VsyncBridgeParent``
+   on the compositor thread of the GPU process, which then dispatches to
+   ``CompositorVsyncScheduler::Observer``.
+6. The ``RefreshTimerVsyncDispatcher`` notifies the Chrome
+   ``RefreshTimer`` that a vsync has occured.
+7. The ``RefreshTimerVsyncDispatcher`` sends IPC messages to all content
+   processes to tick their respective active ``RefreshTimer``.
+8. The ``Compositor`` dispatches input events on the *Compositor
+   Thread*, then composites. Input events are only dispatched on the
+   *Compositor Thread* on b2g.
+9. The ``RefreshDriver`` paints on the *Main Thread*.
+
+Hardware Vsync
+--------------
+
+Hardware vsync events from (1), occur on a specific ``Display`` Object.
+The ``Display`` object is responsible for enabling / disabling vsync on
+a per connected display basis. For example, if two monitors are
+connected, two ``Display`` objects will be created, each listening to
+vsync events for their respective displays. We require one ``Display``
+object per monitor as each monitor may have different vsync rates. As a
+fallback solution, we have one global ``Display`` object that can
+synchronize across all connected displays. The global ``Display`` is
+useful if a window is positioned halfway between the two monitors. Each
+platform will have to implement a specific ``Display`` object to hook
+and listen to vsync events. As of this writing, both Firefox OS and OS X
+create their own hardware specific *Hardware Vsync Thread* that executes
+after a vsync has occured. OS X creates one *Hardware Vsync Thread* per
+``CVDisplayLinkRef``. We do not currently support multiple displays, so
+we use one global ``CVDisplayLinkRef`` that works across all active
+displays. On Windows, we have to create a new platform ``thread`` that
+waits for DwmFlush(), which works across all active displays. Once the
+thread wakes up from DwmFlush(), the actual vsync timestamp is retrieved
+from DwmGetCompositionTimingInfo(), which is the timestamp that is
+actually passed into the compositor and refresh driver.
+
+When a vsync occurs on a ``Display``, the *Hardware Vsync Thread*
+callback fetches all ``CompositorVsyncDispatchers`` associated with the
+``Display``. Each ``CompositorVsyncDispatcher`` is notified that a vsync
+has occured with the vsync’s timestamp. It is the responsibility of the
+``CompositorVsyncDispatcher`` to notify the ``Compositor`` that is
+awaiting vsync notifications. The ``Display`` will then notify the
+associated ``RefreshTimerVsyncDispatcher``, which should notify all
+active ``RefreshDrivers`` to tick.
+
+All ``Display`` objects are encapsulated in a ``VsyncSource`` object.
+The ``VsyncSource`` object lives in ``gfxPlatform`` and is instantiated
+only on the parent process when ``gfxPlatform`` is created. The
+``VsyncSource`` is destroyed when ``gfxPlatform`` is destroyed. There is
+only one ``VsyncSource`` object throughout the entire lifetime of
+Firefox. Each platform is expected to implement their own
+``VsyncSource`` to manage vsync events. On Firefox OS, this is through
+the ``HwcComposer2D``. On OS X, this is through ``CVDisplayLinkRef``. On
+Windows, it should be through ``DwmGetCompositionTimingInfo``.
+
+Compositor
+----------
+
+When the ``CompositorVsyncDispatcher`` is notified of the vsync event,
+the ``CompositorVsyncScheduler::Observer`` associated with the
+``CompositorVsyncDispatcher`` begins execution. Since the
+``CompositorVsyncDispatcher`` executes on the *Hardware Vsync Thread*
+and the ``Compositor`` composites on the ``CompositorThread``, the
+``CompositorVsyncScheduler::Observer`` posts a task to the
+``CompositorThread``. The ``CompositorBridgeParent`` then composites.
+The model where the ``CompositorVsyncDispatcher`` notifies components on
+the *Hardware Vsync Thread*, and the component schedules the task on the
+appropriate thread is used everywhere.
+
+The ``CompositorVsyncScheduler::Observer`` listens to vsync events as
+needed and stops listening to vsync when composites are no longer
+scheduled or required. Every ``CompositorBridgeParent`` is associated
+and tied to one ``CompositorVsyncScheduler::Observer``, which is
+associated with the ``CompositorVsyncDispatcher``. Each
+``CompositorBridgeParent`` is associated with one widget and is created
+when a new platform window or ``nsBaseWidget`` is created. The
+``CompositorBridgeParent``, ``CompositorVsyncDispatcher``,
+``CompositorVsyncScheduler::Observer``, and ``nsBaseWidget`` all have
+the same lifetimes, which are created and destroyed together.
+
+Out-of-process Compositors
+--------------------------
+
+When compositing out-of-process, this model changes slightly. In this
+case there are effectively two observers: a UI process observer
+(``CompositorWidgetVsyncObserver``), and the
+``CompositorVsyncScheduler::Observer`` in the GPU process. There are
+also two dispatchers: the widget dispatcher in the UI process
+(``CompositorVsyncDispatcher``), and the IPDL-based dispatcher in the
+GPU process (``CompositorBridgeParent::NotifyVsync``). The UI process
+observer and the GPU process dispatcher are linked via an IPDL protocol
+called PVsyncBridge. ``PVsyncBridge`` is a top-level protocol for
+sending vsync notifications to the compositor thread in the GPU process.
+The compositor controls vsync observation through a separate actor,
+``PCompositorWidget``, which (as a subactor for
+``CompositorBridgeChild``) links the compositor thread in the GPU
+process to the main thread in the UI process.
+
+Out-of-process compositors do not go through
+``CompositorVsyncDispatcher`` directly. Instead, the
+``CompositorWidgetDelegate`` in the UI process creates one, and gives it
+a ``CompositorWidgetVsyncObserver``. This observer forwards
+notifications to a Vsync I/O thread, where ``VsyncBridgeChild`` then
+forwards the notification again to the compositor thread in the GPU
+process. The notification is received by a ``VsyncBridgeParent``. The
+GPU process uses the layers ID in the notification to find the correct
+compositor to dispatch the notification to.
+
+CompositorVsyncDispatcher
+-------------------------
+
+The ``CompositorVsyncDispatcher`` executes on the *Hardware Vsync
+Thread*. It contains references to the ``nsBaseWidget`` it is associated
+with and has a lifetime equal to the ``nsBaseWidget``. The
+``CompositorVsyncDispatcher`` is responsible for notifying the
+``CompositorBridgeParent`` that a vsync event has occured. There can be
+multiple ``CompositorVsyncDispatchers`` per ``Display``, one
+``CompositorVsyncDispatcher`` per window. The only responsibility of the
+``CompositorVsyncDispatcher`` is to notify components when a vsync event
+has occured, and to stop listening to vsync when no components require
+vsync events. We require one ``CompositorVsyncDispatcher`` per window so
+that we can handle multiple ``Displays``. When compositing in-process,
+the ``CompositorVsyncDispatcher`` is attached to the CompositorWidget
+for the window. When out-of-process, it is attached to the
+CompositorWidgetDelegate, which forwards observer notifications over
+IPDL. In the latter case, its lifetime is tied to a CompositorSession
+rather than the nsIWidget.
+
+Multiple Displays
+-----------------
+
+The ``VsyncSource`` has an API to switch a ``CompositorVsyncDispatcher``
+from one ``Display`` to another ``Display``. For example, when one
+window either goes into full screen mode or moves from one connected
+monitor to another. When one window moves to another monitor, we expect
+a platform specific notification to occur. The detection of when a
+window enters full screen mode or moves is not covered by Silk itself,
+but the framework is built to support this use case. The expected flow
+is that the OS notification occurs on ``nsIWidget``, which retrieves the
+associated ``CompositorVsyncDispatcher``. The
+``CompositorVsyncDispatcher`` then notifies the ``VsyncSource`` to
+switch to the correct ``Display`` the ``CompositorVsyncDispatcher`` is
+connected to. Because the notification works through the ``nsIWidget``,
+the actual switching of the ``CompositorVsyncDispatcher`` to the correct
+``Display`` should occur on the *Main Thread*. The current
+implementation of Silk does not handle this case and needs to be built
+out.
+
+CompositorVsyncScheduler::Observer
+----------------------------------
+
+The ``CompositorVsyncScheduler::Observer`` handles the vsync
+notifications and interactions with the ``CompositorVsyncDispatcher``.
+When the ``Compositor`` requires a scheduled composite, it notifies the
+``CompositorVsyncScheduler::Observer`` that it needs to listen to vsync.
+The ``CompositorVsyncScheduler::Observer`` then observes / unobserves
+vsync as needed from the ``CompositorVsyncDispatcher`` to enable
+composites.
+
+GeckoTouchDispatcher
+--------------------
+
+The ``GeckoTouchDispatcher`` is a singleton that resamples touch events
+to smooth out jank while tracking a user’s finger. Because input and
+composite are linked together, the
+``CompositorVsyncScheduler::Observer`` has a reference to the
+``GeckoTouchDispatcher`` and vice versa.
+
+Input Events
+------------
+
+One large goal of Silk is to align touch events with vsync events. On
+Firefox OS, touchscreens often have different touch scan rates than the
+display refreshes. A Flame device has a touch refresh rate of 75 HZ,
+while a Nexus 4 has a touch refresh rate of 100 HZ, while the device’s
+display refresh rate is 60HZ. When a vsync event occurs, we resample
+touch events, and then dispatch the resampled touch event to APZ. Touch
+events on Firefox OS occur on a *Touch Input Thread* whereas they are
+processed by APZ on the *APZ Controller Thread*. We use `Google
+Android’s touch
+resampling <http://www.masonchang.com/blog/2014/8/25/androids-touch-resampling-algorithm>`__
+algorithm to resample touch events.
+
+Currently, we have a strict ordering between Composites and touch
+events. When a touch event occurs on the *Touch Input Thread*, we store
+the touch event in a queue. When a vsync event occurs, the
+``CompositorVsyncDispatcher`` notifies the ``Compositor`` of a vsync
+event, which notifies the ``GeckoTouchDispatcher``. The
+``GeckoTouchDispatcher`` processes the touch event first on the *APZ
+Controller Thread*, which is the same as the *Compositor Thread* on b2g,
+then the ``Compositor`` finishes compositing. We require this strict
+ordering because if a vsync notification is dispatched to both the
+``Compositor`` and ``GeckoTouchDispatcher`` at the same time, a race
+condition occurs between processing the touch event and therefore
+position versus compositing. In practice, this creates very janky
+scrolling. As of this writing, we have not analyzed input events on
+desktop platforms.
+
+One slight quirk is that input events can start a composite, for example
+during a scroll and after the ``Compositor`` is no longer listening to
+vsync events. In these cases, we notify the ``Compositor`` to observe
+vsync so that it dispatches touch events. If touch events were not
+dispatched, and since the ``Compositor`` is not listening to vsync
+events, the touch events would never be dispatched. The
+``GeckoTouchDispatcher`` handles this case by always forcing the
+``Compositor`` to listen to vsync events while touch events are
+occurring.
+
+Widget, Compositor, CompositorVsyncDispatcher, GeckoTouchDispatcher Shutdown Procedure
+--------------------------------------------------------------------------------------
+
+When the `nsBaseWidget shuts
+down <https://hg.mozilla.org/mozilla-central/file/0df249a0e4d3/widget/nsBaseWidget.cpp#l182>`__
+- It calls nsBaseWidget::DestroyCompositor on the *Gecko Main Thread*.
+During nsBaseWidget::DestroyCompositor, it first destroys the
+CompositorBridgeChild. CompositorBridgeChild sends a sync IPC call to
+CompositorBridgeParent::RecvStop, which calls
+`CompositorBridgeParent::Destroy <https://hg.mozilla.org/mozilla-central/file/ab0490972e1e/gfx/layers/ipc/CompositorBridgeParent.cpp#l509>`__.
+During this time, the *main thread* is blocked on the parent process.
+CompositorBridgeParent::RecvStop runs on the *Compositor thread* and
+cleans up some resources, including setting the
+``CompositorVsyncScheduler::Observer`` to nullptr.
+CompositorBridgeParent::RecvStop also explicitly keeps the
+CompositorBridgeParent alive and posts another task to run
+CompositorBridgeParent::DeferredDestroy on the Compositor loop so that
+all ipdl code can finish executing. The
+``CompositorVsyncScheduler::Observer`` also unobserves from vsync and
+cancels any pending composite tasks. Once
+CompositorBridgeParent::RecvStop finishes, the *main thread* in the
+parent process continues shutting down the nsBaseWidget.
+
+At the same time, the *Compositor thread* is executing tasks until
+CompositorBridgeParent::DeferredDestroy runs, which flushes the
+compositor message loop. Now we have two tasks as both the nsBaseWidget
+releases a reference to the Compositor on the *main thread* during
+destruction and the CompositorBridgeParent::DeferredDestroy releases a
+reference to the CompositorBridgeParent on the *Compositor Thread*.
+Finally, the CompositorBridgeParent itself is destroyed on the *main
+thread* once both references are gone due to explicit `main thread
+destruction <https://hg.mozilla.org/mozilla-central/file/50b95032152c/gfx/layers/ipc/CompositorBridgeParent.h#l148>`__.
+
+With the ``CompositorVsyncScheduler::Observer``, any accesses to the
+widget after nsBaseWidget::DestroyCompositor executes are invalid. Any
+accesses to the compositor between the time the
+nsBaseWidget::DestroyCompositor runs and the
+CompositorVsyncScheduler::Observer’s destructor runs aren’t safe yet a
+hardware vsync event could occur between these times. Since any tasks
+posted on the Compositor loop after
+CompositorBridgeParent::DeferredDestroy is posted are invalid, we make
+sure that no vsync tasks can be posted once
+CompositorBridgeParent::RecvStop executes and DeferredDestroy is posted
+on the Compositor thread. When the sync call to
+CompositorBridgeParent::RecvStop executes, we explicitly set the
+CompositorVsyncScheduler::Observer to null to prevent vsync
+notifications from occurring. If vsync notifications were allowed to
+occur, since the ``CompositorVsyncScheduler::Observer``\ ’s vsync
+notification executes on the *hardware vsync thread*, it would post a
+task to the Compositor loop and may execute after
+CompositorBridgeParent::DeferredDestroy. Thus, we explicitly shut down
+vsync events in the ``CompositorVsyncDispatcher`` and
+``CompositorVsyncScheduler::Observer`` during nsBaseWidget::Shutdown to
+prevent any vsync tasks from executing after
+CompositorBridgeParent::DeferredDestroy.
+
+The ``CompositorVsyncDispatcher`` may be destroyed on either the *main
+thread* or *Compositor Thread*, since both the nsBaseWidget and
+``CompositorVsyncScheduler::Observer`` race to destroy on different
+threads. nsBaseWidget is destroyed on the *main thread* and releases a
+reference to the ``CompositorVsyncDispatcher`` during destruction. The
+``CompositorVsyncScheduler::Observer`` has a race to be destroyed either
+during CompositorBridgeParent shutdown or from the
+``GeckoTouchDispatcher`` which is destroyed on the main thread with
+`ClearOnShutdown <https://hg.mozilla.org/mozilla-central/file/21567e9a6e40/xpcom/base/ClearOnShutdown.h#l15>`__.
+Whichever object, the CompositorBridgeParent or the
+``GeckoTouchDispatcher`` is destroyed last will hold the last reference
+to the ``CompositorVsyncDispatcher``, which destroys the object.
+
+Refresh Driver
+--------------
+
+The Refresh Driver is ticked from a `single active
+timer <https://hg.mozilla.org/mozilla-central/file/ab0490972e1e/layout/base/nsRefreshDriver.cpp#l11>`__.
+The assumption is that there are multiple ``RefreshDrivers`` connected
+to a single ``RefreshTimer``. There are two ``RefreshTimers``: an active
+and an inactive ``RefreshTimer``. Each Tab has its own
+``RefreshDriver``, which connects to one of the global
+``RefreshTimers``. The ``RefreshTimers`` execute on the *Main Thread*
+and tick their connected ``RefreshDrivers``. We do not want to break
+this model of multiple ``RefreshDrivers`` per a set of two global
+``RefreshTimers``. Each ``RefreshDriver`` switches between the active
+and inactive ``RefreshTimer``.
+
+Instead, we create a new ``RefreshTimer``, the ``VsyncRefreshTimer``
+which ticks based on vsync messages. We replace the current active timer
+with a ``VsyncRefreshTimer``. All tabs will then tick based on this new
+active timer. Since the ``RefreshTimer`` has a lifetime of the process,
+we only need to create a single ``RefreshTimerVsyncDispatcher`` per
+``Display`` when Firefox starts. Even if we do not have any content
+processes, the Chrome process will still need a ``VsyncRefreshTimer``,
+thus we can associate the ``RefreshTimerVsyncDispatcher`` with each
+``Display``.
+
+When Firefox starts, we initially create a new ``VsyncRefreshTimer`` in
+the Chrome process. The ``VsyncRefreshTimer`` will listen to vsync
+notifications from ``RefreshTimerVsyncDispatcher`` on the global
+``Display``. When nsRefreshDriver::Shutdown executes, it will delete the
+``VsyncRefreshTimer``. This creates a problem as all the
+``RefreshTimers`` are currently manually memory managed whereas
+``VsyncObservers`` are ref counted. To work around this problem, we
+create a new ``RefreshDriverVsyncObserver`` as an inner class to
+``VsyncRefreshTimer``, which actually receives vsync notifications. It
+then ticks the ``RefreshDrivers`` inside ``VsyncRefreshTimer``.
+
+With Content processes, the start up process is more complicated. We
+send vsync IPC messages via the use of the PBackground thread on the
+parent process, which allows us to send messages from the Parent
+process’ without waiting on the *main thread*. This sends messages from
+the Parent::\ *PBackground Thread* to the Child::\ *Main Thread*. The
+*main thread* receiving IPC messages on the content process is
+acceptable because ``RefreshDrivers`` must execute on the *main thread*.
+However, there is some amount of time required to setup the IPC
+connection upon process creation and during this time, the
+``RefreshDrivers`` must tick to set up the process. To get around this,
+we initially use software ``RefreshTimers`` that already exist during
+content process startup and swap in the ``VsyncRefreshTimer`` once the
+IPC connection is created.
+
+During nsRefreshDriver::ChooseTimer, we create an async PBackground IPC
+open request to create a ``VsyncParent`` and ``VsyncChild``. At the same
+time, we create a software ``RefreshTimer`` and tick the
+``RefreshDrivers`` as normal. Once the PBackground callback is executed
+and an IPC connection exists, we swap all ``RefreshDrivers`` currently
+associated with the active ``RefreshTimer`` and swap the
+``RefreshDrivers`` to use the ``VsyncRefreshTimer``. Since all
+interactions on the content process occur on the main thread, there are
+no need for locks. The ``VsyncParent`` listens to vsync events through
+the ``VsyncRefreshTimerDispatcher`` on the parent side and sends vsync
+IPC messages to the ``VsyncChild``. The ``VsyncChild`` notifies the
+``VsyncRefreshTimer`` on the content process.
+
+During the shutdown process of the content process, ActorDestroy is
+called on the ``VsyncChild`` and ``VsyncParent`` due to the normal
+PBackground shutdown process. Once ActorDestroy is called, no IPC
+messages should be sent across the channel. After ActorDestroy is
+called, the IPDL machinery will delete the **VsyncParent/Child** pair.
+The ``VsyncParent``, due to being a ``VsyncObserver``, is ref counted.
+After ``VsyncParent::ActorDestroy`` is called, it unregisters itself
+from the ``RefreshTimerVsyncDispatcher``, which holds the last reference
+to the ``VsyncParent``, and the object will be deleted.
+
+Thus the overall flow during normal execution is:
+
+1. VsyncSource::Display::RefreshTimerVsyncDispatcher receives a Vsync
+   notification from the OS in the parent process.
+2. RefreshTimerVsyncDispatcher notifies
+   VsyncRefreshTimer::RefreshDriverVsyncObserver that a vsync occured on
+   the parent process on the hardware vsync thread.
+3. RefreshTimerVsyncDispatcher notifies the VsyncParent on the hardware
+   vsync thread that a vsync occured.
+4. The VsyncRefreshTimer::RefreshDriverVsyncObserver in the parent
+   process posts a task to the main thread that ticks the refresh
+   drivers.
+5. VsyncParent posts a task to the PBackground thread to send a vsync
+   IPC message to VsyncChild.
+6. VsyncChild receive a vsync notification on the content process on the
+   main thread and ticks their respective RefreshDrivers.
+
+Compressing Vsync Messages
+--------------------------
+
+Vsync messages occur quite often and the *main thread* can be busy for
+long periods of time due to JavaScript. Consistently sending vsync
+messages to the refresh driver timer can flood the *main thread* with
+refresh driver ticks, causing even more delays. To avoid this problem,
+we compress vsync messages on both the parent and child processes.
+
+On the parent process, newer vsync messages update a vsync timestamp but
+do not actually queue any tasks on the *main thread*. Once the parent
+process’ *main thread* executes the refresh driver tick, it uses the
+most updated vsync timestamp to tick the refresh driver. After the
+refresh driver has ticked, one single vsync message is queued for
+another refresh driver tick task. On the content process, the IPDL
+``compress`` keyword automatically compresses IPC messages.
+
+Multiple Monitors
+-----------------
+
+In order to have multiple monitor support for the ``RefreshDrivers``, we
+have multiple active ``RefreshTimers``. Each ``RefreshTimer`` is
+associated with a specific ``Display`` via an id and tick when it’s
+respective ``Display`` vsync occurs. We have **N RefreshTimers**, where
+N is the number of connected displays. Each ``RefreshTimer`` still has
+multiple ``RefreshDrivers``.
+
+When a tab or window changes monitors, the ``nsIWidget`` receives a
+display changed notification. Based on which display the window is on,
+the window switches to the correct ``RefreshTimerVsyncDispatcher`` and
+``CompositorVsyncDispatcher`` on the parent process based on the display
+id. Each ``TabParent`` should also send a notification to their child.
+Each ``TabChild``, given the display ID, switches to the correct
+``RefreshTimer`` associated with the display ID. When each display vsync
+occurs, it sends one IPC message to notify vsync. The vsync message
+contains a display ID, to tick the appropriate ``RefreshTimer`` on the
+content process. There is still only one **VsyncParent/VsyncChild**
+pair, just each vsync notification will include a display ID, which maps
+to the correct ``RefreshTimer``.
+
+Object Lifetime
+---------------
+
+1. CompositorVsyncDispatcher - Lives as long as the nsBaseWidget
+   associated with the VsyncDispatcher
+2. CompositorVsyncScheduler::Observer - Lives and dies the same time as
+   the CompositorBridgeParent.
+3. RefreshTimerVsyncDispatcher - As long as the associated display
+   object, which is the lifetime of Firefox.
+4. VsyncSource - Lives as long as the gfxPlatform on the chrome process,
+   which is the lifetime of Firefox.
+5. VsyncParent/VsyncChild - Lives as long as the content process
+6. RefreshTimer - Lives as long as the process
+
+Threads
+-------
+
+All ``VsyncObservers`` are notified on the *Hardware Vsync Thread*. It
+is the responsibility of the ``VsyncObservers`` to post tasks to their
+respective correct thread. For example, the
+``CompositorVsyncScheduler::Observer`` will be notified on the *Hardware
+Vsync Thread*, and post a task to the *Compositor Thread* to do the
+actual composition.
+
+1. Compositor Thread - Nothing changes
+2. Main Thread - PVsyncChild receives IPC messages on the main thread.
+   We also enable/disable vsync on the main thread.
+3. PBackground Thread - Creates a connection from the PBackground thread
+   on the parent process to the main thread in the content process.
+4. Hardware Vsync Thread - Every platform is different, but we always
+   have the concept of a hardware vsync thread. Sometimes this is
+   actually created by the host OS. On Windows, we have to create a
+   separate platform thread that blocks on DwmFlush().
rename from gfx/doc/silkArchitecture.png
rename to gfx/docs/SilkArchitecture.png
--- a/gfx/docs/index.rst
+++ b/gfx/docs/index.rst
@@ -1,9 +1,17 @@
-========
 Graphics
 ========
 
-The graphics team's documentation is currently using doxygen. We're tracking the work to integrate it better at https://bugzilla.mozilla.org/show_bug.cgi?id=1150232.
+This collection of linked pages contains design documents for the
+Mozilla graphics architecture. The design documents live in gfx/docs directory.
+
+This `wiki page <https://wiki.mozilla.org/Platform/GFX>`__ contains
+information about graphics and the graphics team at Mozilla.
 
-For now you can read the graphics source code documentation here:
+.. toctree::
+   :maxdepth: 1
 
-http://people.mozilla.org/~bgirard/doxygen/gfx/
+   GraphicsOverview
+   LayersHistory
+   AsyncPanZoom
+   AdvancedLayers
+   Silk
--- a/gfx/gl/SharedSurfaceEGL.cpp
+++ b/gfx/gl/SharedSurfaceEGL.cpp
@@ -263,17 +263,16 @@ SharedSurface_SurfaceTexture::Commit()
     mGL->SwapBuffers();
     UnlockProdImpl();
     mSurface->SetAvailable(false);
 }
 
 void
 SharedSurface_SurfaceTexture::WaitForBufferOwnership()
 {
-    MOZ_RELEASE_ASSERT(!mSurface->GetAvailable());
     mSurface->SetAvailable(true);
 }
 
 bool
 SharedSurface_SurfaceTexture::IsBufferAvailable() const {
     return mSurface->GetAvailable();
 }
 
--- a/gfx/layers/apz/src/AsyncPanZoomController.cpp
+++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp
@@ -3748,27 +3748,30 @@ bool AsyncPanZoomController::AdvanceAnim
   // responsibility to schedule a composite.
   mAsyncTransformAppliedToContent = false;
   bool requestAnimationFrame = false;
   nsTArray<RefPtr<Runnable>> deferredTasks;
 
   {
     RecursiveMutexAutoLock lock(mRecursiveMutex);
 
-    requestAnimationFrame = UpdateAnimation(aSampleTime, &deferredTasks);
-
     { // scope lock
       MutexAutoLock lock(mCheckerboardEventLock);
+      // Update RendertraceProperty before UpdateAnimation() call, since
+      // the UpdateAnimation() updates effective ScrollOffset for next frame
+      // if APZFrameDelay is enabled.
       if (mCheckerboardEvent) {
         mCheckerboardEvent->UpdateRendertraceProperty(
             CheckerboardEvent::UserVisible,
-            CSSRect(Metrics().GetScrollOffset(),
+            CSSRect(GetEffectiveScrollOffset(AsyncPanZoomController::eForCompositing),
                     Metrics().CalculateCompositedSizeInCssPixels()));
       }
     }
+
+    requestAnimationFrame = UpdateAnimation(aSampleTime, &deferredTasks);
   }
 
   // Execute any deferred tasks queued up by mAnimation's Sample() (called by
   // UpdateAnimation()). This needs to be done after the monitor is released
   // since the tasks are allowed to call APZCTreeManager methods which can grab
   // the tree lock.
   for (uint32_t i = 0; i < deferredTasks.Length(); ++i) {
     APZThreadUtils::RunOnControllerThread(deferredTasks[i].forget());
@@ -4023,17 +4026,17 @@ Matrix4x4 AsyncPanZoomController::GetTra
            PostScale(zoomChange.width, zoomChange.height, 1);
 }
 
 uint32_t
 AsyncPanZoomController::GetCheckerboardMagnitude() const
 {
   RecursiveMutexAutoLock lock(mRecursiveMutex);
 
-  CSSPoint currentScrollOffset = Metrics().GetScrollOffset() + mTestAsyncScrollOffset;
+  CSSPoint currentScrollOffset = GetEffectiveScrollOffset(AsyncPanZoomController::eForCompositing) + mTestAsyncScrollOffset;
   CSSRect painted = mLastContentPaintMetrics.GetDisplayPort() + mLastContentPaintMetrics.GetScrollOffset();
   CSSRect visible = CSSRect(currentScrollOffset, Metrics().CalculateCompositedSizeInCssPixels());
 
   CSSIntRegion checkerboard;
   // Round so as to minimize checkerboarding; if we're only showing fractional
   // pixels of checkerboarding it's not really worth counting
   checkerboard.Sub(RoundedIn(visible), RoundedOut(painted));
   return checkerboard.Area();
--- a/gfx/webrender/src/display_list_flattener.rs
+++ b/gfx/webrender/src/display_list_flattener.rs
@@ -1041,16 +1041,46 @@ impl<'a> DisplayListFlattener<'a> {
                 .rev()
                 .find(|sc| sc.rendering_context_3d_prim_index.is_some())
                 .map(|sc| sc.rendering_context_3d_prim_index.unwrap())
                 .unwrap()
         } else {
             *self.picture_stack.last().unwrap()
         };
 
+        // Same for mix-blend-mode.
+        if let Some(mix_blend_mode) = composite_ops.mix_blend_mode {
+            let picture = PicturePrimitive::new_image(
+                self.get_next_picture_id(),
+                Some(PictureCompositeMode::MixBlend(mix_blend_mode)),
+                false,
+                pipeline_id,
+                None,
+                true,
+            );
+
+            let src_prim = BrushPrimitive::new_picture(picture);
+
+            let src_prim_index = self.prim_store.add_primitive(
+                &LayoutRect::zero(),
+                &max_clip,
+                true,
+                clip_chain_id,
+                spatial_node_index,
+                None,
+                PrimitiveContainer::Brush(src_prim),
+            );
+
+            let parent_pic = self.prim_store.get_pic_mut(parent_prim_index);
+            parent_prim_index = src_prim_index;
+            parent_pic.add_primitive(src_prim_index);
+
+            self.picture_stack.push(src_prim_index);
+        }
+
         // For each filter, create a new image with that composite mode.
         for filter in composite_ops.filters.iter().rev() {
             let picture = PicturePrimitive::new_image(
                 self.get_next_picture_id(),
                 Some(PictureCompositeMode::Filter(*filter)),
                 false,
                 pipeline_id,
                 None,
@@ -1071,46 +1101,16 @@ impl<'a> DisplayListFlattener<'a> {
             let parent_pic = self.prim_store.get_pic_mut(parent_prim_index);
             parent_prim_index = src_prim_index;
 
             parent_pic.add_primitive(src_prim_index);
 
             self.picture_stack.push(src_prim_index);
         }
 
-        // Same for mix-blend-mode.
-        if let Some(mix_blend_mode) = composite_ops.mix_blend_mode {
-            let picture = PicturePrimitive::new_image(
-                self.get_next_picture_id(),
-                Some(PictureCompositeMode::MixBlend(mix_blend_mode)),
-                false,
-                pipeline_id,
-                None,
-                true,
-            );
-
-            let src_prim = BrushPrimitive::new_picture(picture);
-
-            let src_prim_index = self.prim_store.add_primitive(
-                &LayoutRect::zero(),
-                &max_clip,
-                true,
-                clip_chain_id,
-                spatial_node_index,
-                None,
-                PrimitiveContainer::Brush(src_prim),
-            );
-
-            let parent_pic = self.prim_store.get_pic_mut(parent_prim_index);
-            parent_prim_index = src_prim_index;
-            parent_pic.add_primitive(src_prim_index);
-
-            self.picture_stack.push(src_prim_index);
-        }
-
         // By default, this picture will be collapsed into
         // the owning target.
         let mut composite_mode = None;
         let mut frame_output_pipeline_id = None;
 
         // If this stacking context if the root of a pipeline, and the caller
         // has requested it as an output frame, create a render task to isolate it.
         if is_pipeline_root && self.output_pipelines.contains(&pipeline_id) {
rename from gfx/doc/README.webrender
rename to gfx/webrender_bindings/README.webrender
--- a/gfx/webrender_bindings/revision.txt
+++ b/gfx/webrender_bindings/revision.txt
@@ -1,1 +1,1 @@
-6415faa7f6acd412d4cc6b745cf802204690611c
+3fa5eb8aaa0172306bfdc5e87d1d0c9af39d103a
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -642,17 +642,19 @@ InitFromBailout(JSContext* cx, size_t fr
     // +---------------+
     // |    StackS     |
     // +---------------+  --- IF NOT LAST INLINE FRAME,
     // |  Descr(BLJS)  |  --- CALLING INFO STARTS HERE
     // +---------------+
     // |  ReturnAddr   | <-- return into main jitcode after IC
     // +===============+
 
-    JitSpew(JitSpew_BaselineBailouts, "      Unpacking %s:%u", script->filename(), script->lineno());
+    JitSpew(JitSpew_BaselineBailouts, "      Unpacking %s:%u:%u", script->filename(),
+                                                                  script->lineno(), 
+                                                                  script->column());
     JitSpew(JitSpew_BaselineBailouts, "      [BASELINE-JS FRAME]");
 
     // Calculate and write the previous frame pointer value.
     // Record the virtual stack offset at this location.  Later on, if we end up
     // writing out a BaselineStub frame for the next callee, we'll need to save the
     // address.
     void* prevFramePtr = builder.calculatePrevFramePtr();
     if (!builder.writePtr(prevFramePtr, "PrevFramePtr"))
@@ -1040,19 +1042,19 @@ InitFromBailout(JSContext* cx, size_t fr
                 // arguments in the slots and not be 4.
                 MOZ_ASSERT(exprStackSlots == expectedDepth);
             }
         }
     }
 #endif
 
 #ifdef JS_JITSPEW
-    JitSpew(JitSpew_BaselineBailouts, "      Resuming %s pc offset %d (op %s) (line %d) of %s:%u",
+    JitSpew(JitSpew_BaselineBailouts, "      Resuming %s pc offset %d (op %s) (line %d) of %s:%u:%u",
                 resumeAfter ? "after" : "at", (int) pcOff, CodeName[op],
-                PCToLineNumber(script, pc), script->filename(), script->lineno());
+                PCToLineNumber(script, pc), script->filename(), script->lineno(), script->column());
     JitSpew(JitSpew_BaselineBailouts, "      Bailout kind: %s",
             BailoutKindString(bailoutKind));
 #endif
 
     bool pushedNewTarget = IsConstructorCallPC(pc);
 
     // If this was the last inline frame, or we are bailing out to a catch or
     // finally block in this frame, then unpacking is almost done.
@@ -1550,18 +1552,18 @@ jit::BailoutIonToBaseline(JSContext* cx,
     //      +---------------+
     //      |  ReturnAddr   |
     //      +---------------+
     //      |    |||||      | <---- Overwrite starting here.
     //      |    |||||      |
     //      |    |||||      |
     //      +---------------+
 
-    JitSpew(JitSpew_BaselineBailouts, "Bailing to baseline %s:%u (IonScript=%p) (FrameType=%d)",
-            iter.script()->filename(), iter.script()->lineno(), (void*) iter.ionScript(),
+    JitSpew(JitSpew_BaselineBailouts, "Bailing to baseline %s:%u:%u (IonScript=%p) (FrameType=%d)",
+            iter.script()->filename(), iter.script()->lineno(), iter.script()->column(), (void*) iter.ionScript(),
             (int) prevFrameType);
 
     bool catchingException;
     bool propagatingExceptionForDebugMode;
     if (excInfo) {
         catchingException = excInfo->catchingException();
         propagatingExceptionForDebugMode = excInfo->propagatingIonExceptionForDebugMode();
 
@@ -1606,18 +1608,18 @@ jit::BailoutIonToBaseline(JSContext* cx,
 
 #ifdef TRACK_SNAPSHOTS
     snapIter.spewBailingFrom();
 #endif
 
     RootedFunction callee(cx, iter.maybeCallee());
     RootedScript scr(cx, iter.script());
     if (callee) {
-        JitSpew(JitSpew_BaselineBailouts, "  Callee function (%s:%u)",
-                scr->filename(), scr->lineno());
+        JitSpew(JitSpew_BaselineBailouts, "  Callee function (%s:%u:%u)",
+                scr->filename(), scr->lineno(), scr->column());
     } else {
         JitSpew(JitSpew_BaselineBailouts, "  No callee!");
     }
 
     if (iter.isConstructing())
         JitSpew(JitSpew_BaselineBailouts, "  Constructing!");
     else
         JitSpew(JitSpew_BaselineBailouts, "  Not constructing!");
@@ -1732,59 +1734,59 @@ InvalidateAfterBailout(JSContext* cx, Ha
 
     JitSpew(JitSpew_BaselineBailouts, "Invalidating due to %s", reason);
     Invalidate(cx, outerScript);
 }
 
 static void
 HandleBoundsCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
 {
-    JitSpew(JitSpew_IonBailouts, "Bounds check failure %s:%u, inlined into %s:%u",
-            innerScript->filename(), innerScript->lineno(),
-            outerScript->filename(), outerScript->lineno());
+    JitSpew(JitSpew_IonBailouts, "Bounds check failure %s:%u:%u, inlined into %s:%u:%u",
+            innerScript->filename(), innerScript->lineno(), innerScript->column(),
+            outerScript->filename(), outerScript->lineno(), outerScript->column());
 
     if (!innerScript->failedBoundsCheck())
         innerScript->setFailedBoundsCheck();
 
     InvalidateAfterBailout(cx, outerScript, "bounds check failure");
     if (innerScript->hasIonScript())
         Invalidate(cx, innerScript);
 }
 
 static void
 HandleShapeGuardFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
 {
-    JitSpew(JitSpew_IonBailouts, "Shape guard failure %s:%u, inlined into %s:%u",
-            innerScript->filename(), innerScript->lineno(),
-            outerScript->filename(), outerScript->lineno());
+    JitSpew(JitSpew_IonBailouts, "Shape guard failure %s:%u:%u, inlined into %s:%u:%u",
+            innerScript->filename(), innerScript->lineno(), innerScript->column(),
+            outerScript->filename(), outerScript->lineno(), outerScript->column());
 
     // TODO: Currently this mimic's Ion's handling of this case.  Investigate setting
     // the flag on innerScript as opposed to outerScript, and maybe invalidating both
     // inner and outer scripts, instead of just the outer one.
     outerScript->setFailedShapeGuard();
 
     InvalidateAfterBailout(cx, outerScript, "shape guard failure");
 }
 
 static void
 HandleBaselineInfoBailout(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
 {
-    JitSpew(JitSpew_IonBailouts, "Baseline info failure %s:%u, inlined into %s:%u",
-            innerScript->filename(), innerScript->lineno(),
-            outerScript->filename(), outerScript->lineno());
+    JitSpew(JitSpew_IonBailouts, "Baseline info failure %s:%u:%u, inlined into %s:%u:%u",
+            innerScript->filename(), innerScript->lineno(), innerScript->column(),
+            outerScript->filename(), outerScript->lineno(), outerScript->column());
 
     InvalidateAfterBailout(cx, outerScript, "invalid baseline info");
 }
 
 static void
 HandleLexicalCheckFailure(JSContext* cx, HandleScript outerScript, HandleScript innerScript)
 {
-    JitSpew(JitSpew_IonBailouts, "Lexical check failure %s:%u, inlined into %s:%u",
-            innerScript->filename(), innerScript->lineno(),
-            outerScript->filename(), outerScript->lineno());
+    JitSpew(JitSpew_IonBailouts, "Lexical check failure %s:%u:%u, inlined into %s:%u:%u",
+            innerScript->filename(), innerScript->lineno(), innerScript->column(),
+            outerScript->filename(), outerScript->lineno(), outerScript->column());
 
     if (!innerScript->failedLexicalCheck())
         innerScript->setFailedLexicalCheck();
 
     InvalidateAfterBailout(cx, outerScript, "lexical check failure");
     if (innerScript->hasIonScript())
         Invalidate(cx, innerScript);
 }
@@ -1988,19 +1990,19 @@ jit::FinishBailoutToBaseline(BaselineBai
     // If we are catching an exception, we need to unwind scopes.
     // See |SettleOnTryNote|
     if (cx->isExceptionPending() && faultPC) {
         EnvironmentIter ei(cx, topFrame, faultPC);
         UnwindEnvironment(cx, ei, tryPC);
     }
 
     JitSpew(JitSpew_BaselineBailouts,
-            "  Restored outerScript=(%s:%u,%u) innerScript=(%s:%u,%u) (bailoutKind=%u)",
-            outerScript->filename(), outerScript->lineno(), outerScript->getWarmUpCount(),
-            innerScript->filename(), innerScript->lineno(), innerScript->getWarmUpCount(),
+            "  Restored outerScript=(%s:%u:%u,%u) innerScript=(%s:%u:%u,%u) (bailoutKind=%u)",
+            outerScript->filename(), outerScript->lineno(), outerScript->column(), outerScript->getWarmUpCount(),
+            innerScript->filename(), innerScript->lineno(), innerScript->column(), innerScript->getWarmUpCount(),
             (unsigned) bailoutKind);
 
     switch (bailoutKind) {
       // Normal bailouts.
       case Bailout_Inevitable:
       case Bailout_DuringVMCall:
       case Bailout_NonJSFunctionCallee:
       case Bailout_DynamicNameNotFound:
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -81,21 +81,21 @@ BaselineCompiler::addPCMappingEntry(bool
     entry.addIndexEntry = addIndexEntry;
 
     return pcMappingEntries_.append(entry);
 }
 
 MethodStatus
 BaselineCompiler::compile()
 {
-    JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%u (%p)",
-            script->filename(), script->lineno(), script);
-
-    JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%u",
-            script->filename(), script->lineno());
+    JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%u:%u (%p)",
+            script->filename(), script->lineno(), script->column(), script);
+
+    JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%u:%u",
+            script->filename(), script->lineno(), script->column());
 
     TraceLoggerThread* logger = TraceLoggerForCurrentThread(cx);
     TraceLoggerEvent scriptEvent(TraceLogger_AnnotateScripts, script);
     AutoTraceLog logScript(logger, scriptEvent);
     AutoTraceLog logCompile(logger, TraceLogger_BaselineCompilation);
 
     AutoKeepTypeScripts keepTypes(cx);
     if (!script->ensureHasTypes(cx, keepTypes) || !script->ensureHasAnalyzedArgsUsage(cx))
@@ -217,19 +217,19 @@ BaselineCompiler::compile()
     if (!baselineScript) {
         ReportOutOfMemory(cx);
         return Method_Error;
     }
 
     baselineScript->setMethod(code);
     baselineScript->setTemplateEnvironment(templateEnv);
 
-    JitSpew(JitSpew_BaselineScripts, "Created BaselineScript %p (raw %p) for %s:%u",
+    JitSpew(JitSpew_BaselineScripts, "Created BaselineScript %p (raw %p) for %s:%u:%u",
             (void*) baselineScript.get(), (void*) code->raw(),
-            script->filename(), script->lineno());
+            script->filename(), script->lineno(), script->column());
 
     MOZ_ASSERT(pcMappingIndexEntries.length() > 0);
     baselineScript->copyPCMappingIndexEntries(&pcMappingIndexEntries[0]);
 
     MOZ_ASSERT(pcEntries.length() > 0);
     baselineScript->copyPCMappingEntries(pcEntries);
 
     // Copy IC entries
@@ -273,18 +273,18 @@ BaselineCompiler::compile()
     baselineScript->copyYieldAndAwaitEntries(script, yieldAndAwaitOffsets_);
 
     if (compileDebugInstrumentation_)
         baselineScript->setHasDebugInstrumentation();
 
     // Always register a native => bytecode mapping entry, since profiler can be
     // turned on with baseline jitcode on stack, and baseline jitcode cannot be invalidated.
     {
-        JitSpew(JitSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%u (%p)",
-                    script->filename(), script->lineno(), baselineScript.get());
+        JitSpew(JitSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%u:%u (%p)",
+                    script->filename(), script->lineno(), script->column(), baselineScript.get());
 
         // Generate profiling string.
         char* str = JitcodeGlobalEntry::createScriptString(cx, script);
         if (!str)
             return Method_Error;
 
         JitcodeGlobalEntry::BaselineEntry entry;
         entry.init(code, code->raw(), code->rawEnd(), script, str);
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -318,29 +318,29 @@ ICEntryKindToString(ICEntry::Kind kind)
 }
 #endif // JS_JITSPEW
 
 static void
 SpewPatchBaselineFrame(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
                        JSScript* script, ICEntry::Kind frameKind, jsbytecode* pc)
 {
     JitSpew(JitSpew_BaselineDebugModeOSR,
-            "Patch return %p -> %p on BaselineJS frame (%s:%u) from %s at %s",
+            "Patch return %p -> %p on BaselineJS frame (%s:%u:%u) from %s at %s",
             oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
-            ICEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
+            script->column(), ICEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
 }
 
 static void
 SpewPatchBaselineFrameFromExceptionHandler(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
                                            JSScript* script, jsbytecode* pc)
 {
     JitSpew(JitSpew_BaselineDebugModeOSR,
-            "Patch return %p -> %p on BaselineJS frame (%s:%u) from exception handler at %s",
+            "Patch return %p -> %p on BaselineJS frame (%s:%u:%u) from exception handler at %s",
             oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
-            CodeName[(JSOp)*pc]);
+            script->column(), CodeName[(JSOp)*pc]);
 }
 
 static void
 SpewPatchStubFrame(ICStub* oldStub, ICStub* newStub)
 {
     JitSpew(JitSpew_BaselineDebugModeOSR,
             "Patch   stub %p -> %p on BaselineStub frame (%s)",
             oldStub, newStub, newStub ? ICStub::KindString(newStub->kind()) : "exception handler");
@@ -678,18 +678,19 @@ RecompileBaselineScriptForDebugMode(JSCo
 {
     BaselineScript* oldBaselineScript = script->baselineScript();
 
     // If a script is on the stack multiple times, it may have already
     // been recompiled.
     if (oldBaselineScript->hasDebugInstrumentation() == observing)
         return true;
 
-    JitSpew(JitSpew_BaselineDebugModeOSR, "Recompiling (%s:%u) for %s",
-            script->filename(), script->lineno(), observing ? "DEBUGGING" : "NORMAL EXECUTION");
+    JitSpew(JitSpew_BaselineDebugModeOSR, "Recompiling (%s:%u:%u) for %s",
+            script->filename(), script->lineno(), script->column(), 
+            observing ? "DEBUGGING" : "NORMAL EXECUTION");
 
     AutoKeepTypeScripts keepTypes(cx);
     script->setBaselineScript(cx->runtime(), nullptr);
 
     MethodStatus status = BaselineCompile(cx, script, /* forceDebugMode = */ observing);
     if (status != Method_Compiled) {
         // We will only fail to recompile for debug mode due to OOM. Restore
         // the old baseline script in case something doesn't properly
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -65,19 +65,20 @@ FallbackICSpew(JSContext* cx, ICFallback
 
         char fmtbuf[100];
         va_list args;
         va_start(args, fmt);
         (void) VsprintfLiteral(fmtbuf, fmt, args);
         va_end(args);
 
         JitSpew(JitSpew_BaselineICFallback,
-                "Fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%zu): %s",
+                "Fallback hit for (%s:%u:%u) (pc=%zu,line=%d,uses=%d,stubs=%zu): %s",
                 script->filename(),
                 script->lineno(),
+                script->column(),
                 script->pcToOffset(pc),
                 PCToLineNumber(script, pc),
                 script->getWarmUpCount(),
                 stub->numOptimizedStubs(),
                 fmtbuf);
     }
 }
 
@@ -90,19 +91,20 @@ TypeFallbackICSpew(JSContext* cx, ICType
 
         char fmtbuf[100];
         va_list args;
         va_start(args, fmt);
         (void) VsprintfLiteral(fmtbuf, fmt, args);
         va_end(args);
 
         JitSpew(JitSpew_BaselineICFallback,
-                "Type monitor fallback hit for (%s:%u) (pc=%zu,line=%d,uses=%d,stubs=%d): %s",
+                "Type monitor fallback hit for (%s:%u:%u) (pc=%zu,line=%d,uses=%d,stubs=%d): %s",
                 script->filename(),
                 script->lineno(),
+                script->column(),
                 script->pcToOffset(pc),
                 PCToLineNumber(script, pc),
                 script->getWarmUpCount(),
                 (int) stub->numOptimizedMonitorStubs(),
                 fmtbuf);
     }
 }
 #endif // JS_JITSPEW
@@ -3333,19 +3335,19 @@ TryAttachCallStub(JSContext* cx, ICCall_
         }
 
         if (nativeWithJitEntry) {
             JitSpew(JitSpew_BaselineIC,
                     "  Generating Call_Scripted stub (native=%p with jit entry, cons=%s, spread=%s)",
                     fun->native(), constructing ? "yes" : "no", isSpread ? "yes" : "no");
         } else {
             JitSpew(JitSpew_BaselineIC,
-                    "  Generating Call_Scripted stub (fun=%p, %s:%u, cons=%s, spread=%s)",
+                    "  Generating Call_Scripted stub (fun=%p, %s:%u:%u, cons=%s, spread=%s)",
                     fun.get(), fun->nonLazyScript()->filename(), fun->nonLazyScript()->lineno(),
-                    constructing ? "yes" : "no", isSpread ? "yes" : "no");
+                    fun->nonLazyScript()->column(), constructing ? "yes" : "no", isSpread ? "yes" : "no");
         }
 
         bool isCrossRealm = cx->realm() != fun->realm();
         ICCallScriptedCompiler compiler(cx, typeMonitorFallback->firstMonitorStub(),
                                         fun, templateObject,
                                         constructing, isSpread, isCrossRealm,
                                         script->pcToOffset(pc));
         ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -10207,19 +10207,20 @@ CodeGenerator::generateWasm(wasm::FuncTy
     MOZ_ASSERT(safepoints_.size() == 0);
     MOZ_ASSERT(!scriptCounts_);
     return true;
 }
 
 bool
 CodeGenerator::generate()
 {
-    JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u",
+    JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
             gen->info().script()->filename(),
-            gen->info().script()->lineno());
+            gen->info().script()->lineno(),
+            gen->info().script()->column());
 
     // Initialize native code table with an entry to the start of
     // top-level script.
     InlineScriptTree* tree = gen->info().inlineScriptTree();
     jsbytecode* startPC = tree->script()->code();
     BytecodeSite* startSite = new(gen->alloc()) BytecodeSite(tree, startPC);
     if (!addNativeToBytecodeEntry(startSite))
         return false;
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -2027,19 +2027,19 @@ IonCompile(JSContext* cx, JSScript* scri
 
         return reason;
     }
 
     AssertBasicGraphCoherency(builder->graph());
 
     // If possible, compile the script off thread.
     if (options.offThreadCompilationAvailable()) {
-        JitSpew(JitSpew_IonSyncLogs, "Can't log script %s:%u"
+        JitSpew(JitSpew_IonSyncLogs, "Can't log script %s:%u:%u"
                 ". (Compiled on background thread.)",
-                builderScript->filename(), builderScript->lineno());
+                builderScript->filename(), builderScript->lineno(), builderScript->column());
 
         if (!CreateMIRRootList(*builder))
             return AbortReason::Alloc;
 
         AutoLockHelperThreadState lock;
         if (!StartOffThreadIonCompile(builder, lock)) {
             JitSpew(JitSpew_IonAbort, "Unable to start off-thread ion compilation.");
             builder->graphSpewer().endFunction();
@@ -2222,17 +2222,18 @@ Compile(JSContext* cx, HandleScript scri
         return Method_Skipped;
 
     if (script->isDebuggee() || (osrFrame && osrFrame->isDebuggee())) {
         TrackAndSpewIonAbort(cx, script, "debugging");
         return Method_Skipped;
     }
 
     if (!CanIonCompileScript(cx, script)) {
-        JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%u", script->filename(), script->lineno());
+        JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%u:%u", script->filename(),
+                script->lineno(), script->column());
         return Method_CantCompile;
     }
 
     bool recompile = false;
     OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc);
     if (optimizationLevel == OptimizationLevel::DontCompile)
         return Method_Skipped;
 
@@ -2492,18 +2493,19 @@ jit::IonCompileScriptForBaseline(JSConte
         // TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
         // TODO: Clear all optimized stubs.
         // TODO: Add a ion-script-already-exists checker stub.
         return true;
     }
 
     // Ensure that Ion-compiled code is available.
     JitSpew(JitSpew_BaselineOSR,
-            "WarmUpCounter for %s:%u reached %d at pc %p, trying to switch to Ion!",
-            script->filename(), script->lineno(), (int) script->getWarmUpCount(), (void*) pc);
+            "WarmUpCounter for %s:%u:%u reached %d at pc %p, trying to switch to Ion!",
+            script->filename(), script->lineno(), script->column(), 
+            (int) script->getWarmUpCount(), (void*) pc);
 
     MethodStatus stat;
     if (isLoopEntry) {
         MOZ_ASSERT(LoopEntryCanIonOsr(pc));
         JitSpew(JitSpew_BaselineOSR, "  Compile at loop entry!");
         stat = BaselineCanEnterAtBranch(cx, script, frame, pc);
     } else if (frame->isFunctionFrame()) {
         JitSpew(JitSpew_BaselineOSR, "  Compile function from top for later entry!");
@@ -2596,20 +2598,20 @@ InvalidateActivation(FreeOp* fop, const 
             const char* type = "Unknown";
             if (frame.isIonJS())
                 type = "Optimized";
             else if (frame.isBaselineJS())
                 type = "Baseline";
             else if (frame.isBailoutJS())
                 type = "Bailing";
             JitSpew(JitSpew_IonInvalidate,
-                    "#%zu %s JS frame @ %p, %s:%u (fun: %p, script: %p, pc %p)",
+                    "#%zu %s JS frame @ %p, %s:%u:%u (fun: %p, script: %p, pc %p)",
                     frameno, type, frame.fp(), frame.script()->maybeForwardedFilename(),
-                    frame.script()->lineno(), frame.maybeCallee(), (JSScript*)frame.script(),
-                    frame.returnAddressToFp());
+                    frame.script()->lineno(), frame.script()->column(), frame.maybeCallee(), 
+                    (JSScript*)frame.script(), frame.returnAddressToFp());
             break;
           }
           case FrameType::BaselineStub:
             JitSpew(JitSpew_IonInvalidate, "#%zu baseline stub frame @ %p", frameno, frame.fp());
             break;
           case FrameType::Rectifier:
             JitSpew(JitSpew_IonInvalidate, "#%zu rectifier frame @ %p", frameno, frame.fp());
             break;
@@ -2753,18 +2755,19 @@ jit::Invalidate(TypeZone& types, FreeOp*
     for (const RecompileInfo& info : invalid) {
         if (cancelOffThread)
             CancelOffThreadIonCompile(info.script());
 
         IonScript* ionScript = info.maybeIonScriptToInvalidate(types);
         if (!ionScript)
             continue;
 
-        JitSpew(JitSpew_IonInvalidate, " Invalidate %s:%u, IonScript %p",
-                info.script()->filename(), info.script()->lineno(), ionScript);
+        JitSpew(JitSpew_IonInvalidate, " Invalidate %s:%u:%u, IonScript %p",
+                info.script()->filename(), info.script()->lineno(), 
+                info.script()->column(), ionScript);
 
         // Keep the ion script alive during the invalidation and flag this
         // ionScript as being invalidated.  This increment is removed by the
         // loop after the calls to InvalidateActivation.
         ionScript->incrementInvalidationCount();
         numInvalidations++;
     }
 
@@ -2845,17 +2848,18 @@ jit::Invalidate(JSContext* cx, JSScript*
         //      "<filename>:<lineno>"
 
         // Get the script filename, if any, and its length.
         const char* filename = script->filename();
         if (filename == nullptr)
             filename = "<unknown>";
 
         // Construct the descriptive string.
-        UniqueChars buf = JS_smprintf("Invalidate %s:%u", filename, script->lineno());
+        UniqueChars buf = JS_smprintf("Invalidate %s:%u:%u", filename, 
+                script->lineno(), script->column());
 
         // Ignore the event on allocation failure.
         if (buf) {
             cx->runtime()->geckoProfiler().markEvent(buf.get());
         }
     }
 
     // RecompileInfoVector has inline space for at least one element.
@@ -2881,18 +2885,18 @@ jit::FinishInvalidation(FreeOp* fop, JSS
     // true. In this case we have to wait until destroying it.
     if (!ion->invalidated())
         jit::IonScript::Destroy(fop, ion);
 }
 
 void
 jit::ForbidCompilation(JSContext* cx, JSScript* script)
 {
-    JitSpew(JitSpew_IonAbort, "Disabling Ion compilation of script %s:%u",
-            script->filename(), script->lineno());
+    JitSpew(JitSpew_IonAbort, "Disabling Ion compilation of script %s:%u:%u",
+            script->filename(), script->lineno(), script->column());
 
     CancelOffThreadIonCompile(script);
 
     if (script->hasIonScript())
         Invalidate(cx, script, false);
 
     script->setIonScript(cx->runtime(), ION_DISABLED_SCRIPT);
 }
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -321,18 +321,19 @@ IonBuilder::getPolyCallTargets(Temporary
 
     return Ok();
 }
 
 IonBuilder::InliningDecision
 IonBuilder::DontInline(JSScript* targetScript, const char* reason)
 {
     if (targetScript) {
-        JitSpew(JitSpew_Inlining, "Cannot inline %s:%u: %s",
-                targetScript->filename(), targetScript->lineno(), reason);
+        JitSpew(JitSpew_Inlining, "Cannot inline %s:%u:%u %s",
+                targetScript->filename(), targetScript->lineno(), 
+                targetScript->column(), reason);
     } else {
         JitSpew(JitSpew_Inlining, "Cannot inline: %s", reason);
     }
 
     return InliningDecision_DontInline;
 }
 
 /*
@@ -747,24 +748,24 @@ IonBuilder::build()
         script()->baselineScript()->resetMaxInliningDepth();
 
     MBasicBlock* entry;
     MOZ_TRY_VAR(entry, newBlock(info().firstStackSlot(), pc));
     MOZ_TRY(setCurrentAndSpecializePhis(entry));
 
 #ifdef JS_JITSPEW
     if (info().isAnalysis()) {
-        JitSpew(JitSpew_IonScripts, "Analyzing script %s:%u (%p) %s",
-                script()->filename(), script()->lineno(), (void*)script(),
-                AnalysisModeString(info().analysisMode()));
+        JitSpew(JitSpew_IonScripts, "Analyzing script %s:%u:%u (%p) %s",
+                script()->filename(), script()->lineno(), script()->column(),
+                (void*)script(), AnalysisModeString(info().analysisMode()));
     } else {
-        JitSpew(JitSpew_IonScripts, "%sompiling script %s:%u (%p) (warmup-counter=%" PRIu32 ", level=%s)",
+        JitSpew(JitSpew_IonScripts, "%sompiling script %s:%u:%u (%p) (warmup-counter=%" PRIu32 ", level=%s)",
                 (script()->hasIonScript() ? "Rec" : "C"),
-                script()->filename(), script()->lineno(), (void*)script(),
-                script()->getWarmUpCount(), OptimizationLevelString(optimizationInfo().level()));
+                script()->filename(), script()->lineno(), script()->column(), 
+                (void*)script(), script()->getWarmUpCount(), OptimizationLevelString(optimizationInfo().level()));
     }
 #endif
 
     MOZ_TRY(initParameters());
     initLocals();
 
     // Initialize something for the env chain. We can bail out before the
     // start instruction, but the snapshot is encoded *at* the start
@@ -921,18 +922,18 @@ IonBuilder::processIterators()
 AbortReasonOr<Ok>
 IonBuilder::buildInline(IonBuilder* callerBuilder, MResumePoint* callerResumePoint,
                         CallInfo& callInfo)
 {
     inlineCallInfo_ = &callInfo;
 
     MOZ_TRY(init());
 
-    JitSpew(JitSpew_IonScripts, "Inlining script %s:%u (%p)",
-            script()->filename(), script()->lineno(), (void*)script());
+    JitSpew(JitSpew_IonScripts, "Inlining script %s:%u:%u (%p)",
+            script()->filename(), script()->lineno(), script()->column(), (void*)script());
 
     callerBuilder_ = callerBuilder;
     callerResumePoint_ = callerResumePoint;
 
     if (callerBuilder->failedBoundsCheck_)
         failedBoundsCheck_ = true;
 
     if (callerBuilder->failedShapeGuard_)
@@ -1218,18 +1219,18 @@ IonBuilder::initEnvironmentChain(MDefini
     // See: |InitFromBailout|
     current->setEnvironmentChain(env);
     return Ok();
 }
 
 void
 IonBuilder::initArgumentsObject()
 {
-    JitSpew(JitSpew_IonMIR, "%s:%u - Emitting code to initialize arguments object! block=%p",
-            script()->filename(), script()->lineno(), current);
+    JitSpew(JitSpew_IonMIR, "%s:%u:%u - Emitting code to initialize arguments object! block=%p",
+            script()->filename(), script()->lineno(), script()->column(), current);
     MOZ_ASSERT(info().needsArgsObj());
 
     bool mapped = script()->hasMappedArgsObj();
     ArgumentsObject* templateObj = script()->realm()->maybeArgumentsTemplateObject(mapped);
 
     MCreateArgumentsObject* argsObj =
         MCreateArgumentsObject::New(alloc(), current->environmentChain(), templateObj);
     current->add(argsObj);
@@ -1430,18 +1431,18 @@ GetOrCreateControlFlowGraph(TempAllocato
         return CFGState::Alloc;
 
     if (script->hasBaselineScript()) {
         MOZ_ASSERT(!script->baselineScript()->controlFlowGraph());
         script->baselineScript()->setControlFlowGraph(cfg);
     }
 
     if (JitSpewEnabled(JitSpew_CFG)) {
-        JitSpew(JitSpew_CFG, "Generating graph for %s:%u",
-                             script->filename(), script->lineno());
+        JitSpew(JitSpew_CFG, "Generating graph for %s:%u:%u",
+                             script->filename(), script->lineno(), script->column());
         Fprinter& print = JitSpewPrinter();
         cfg->dump(print, script);
     }
 
     *cfgOut = cfg;
     return CFGState::Success;
 }
 
@@ -4029,18 +4030,18 @@ IonBuilder::makeInliningDecision(JSObjec
     // Callee must have been called a few times to have somewhat stable
     // type information, except for definite properties analysis,
     // as the caller has not run yet.
     if (targetScript->getWarmUpCount() < optimizationInfo().inliningWarmUpThreshold() &&
         !targetScript->baselineScript()->ionCompiledOrInlined() &&
         info().analysisMode() != Analysis_DefiniteProperties)
     {
         trackOptimizationOutcome(TrackedOutcome::CantInlineNotHot);
-        JitSpew(JitSpew_Inlining, "Cannot inline %s:%u: callee is insufficiently hot.",
-                targetScript->filename(), targetScript->lineno());
+        JitSpew(JitSpew_Inlining, "Cannot inline %s:%u:%u: callee is insufficiently hot.",
+                targetScript->filename(), targetScript->lineno(), targetScript->column());
         return InliningDecision_WarmUpCountTooLow;
     }
 
     // Don't inline if the callee is known to inline a lot of code, to avoid
     // huge MIR graphs.
     uint32_t inlinedBytecodeLength = targetScript->baselineScript()->inlinedBytecodeLength();
     if (inlinedBytecodeLength > optimizationInfo().inlineMaxCalleeInlinedBytecodeLength()) {
         trackOptimizationOutcome(TrackedOutcome::CantInlineBigCalleeInlinedBytecodeLength);
--- a/js/src/jit/IonIC.cpp
+++ b/js/src/jit/IonIC.cpp
@@ -157,18 +157,18 @@ IonGetPropertyIC::update(JSContext* cx, 
 
     if (!attached && ic->idempotent()) {
         // Invalidate the cache if the property was not found, or was found on
         // a non-native object. This ensures:
         // 1) The property read has no observable side-effects.
         // 2) There's no need to dynamically monitor the return type. This would
         //    be complicated since (due to GVN) there can be multiple pc's
         //    associated with a single idempotent cache.
-        JitSpew(JitSpew_IonIC, "Invalidating from idempotent cache %s:%u",
-                outerScript->filename(), outerScript->lineno());
+        JitSpew(JitSpew_IonIC, "Invalidating from idempotent cache %s:%u:%u",
+                outerScript->filename(), outerScript->lineno(), outerScript->column());
 
         outerScript->setInvalidatedIdempotentCache();
 
         // Do not re-invalidate if the lookup already caused invalidation.
         if (outerScript->hasIonScript())
             Invalidate(cx, outerScript);
 
         // We will redo the potentially effectful lookup in Baseline.
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -1531,24 +1531,25 @@ JitcodeIonTable::WriteIonTable(CompactBu
                                const NativeToBytecode* end,
                                uint32_t* tableOffsetOut, uint32_t* numRegionsOut)
 {
     MOZ_ASSERT(tableOffsetOut != nullptr);
     MOZ_ASSERT(numRegionsOut != nullptr);
     MOZ_ASSERT(writer.length() == 0);
     MOZ_ASSERT(scriptListSize > 0);
 
-    JitSpew(JitSpew_Profiling, "Writing native to bytecode map for %s:%u (%zu entries)",
-            scriptList[0]->filename(), scriptList[0]->lineno(),
+    JitSpew(JitSpew_Profiling, "Writing native to bytecode map for %s:%u:%u (%zu entries)",
+            scriptList[0]->filename(), scriptList[0]->lineno(), scriptList[0]->column(),
             mozilla::PointerRangeSize(start, end));
 
     JitSpew(JitSpew_Profiling, "  ScriptList of size %d", int(scriptListSize));
     for (uint32_t i = 0; i < scriptListSize; i++) {
-        JitSpew(JitSpew_Profiling, "  Script %d - %s:%u",
-                int(i), scriptList[i]->filename(), scriptList[i]->lineno());
+        JitSpew(JitSpew_Profiling, "  Script %d - %s:%u:%u",
+                int(i), scriptList[i]->filename(), 
+                scriptList[i]->lineno(), scriptList[i]->column());
     }
 
     // Write out runs first.  Keep a vector tracking the positive offsets from payload
     // start to the run.
     const NativeToBytecode* curEntry = start;
     js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
 
     while (curEntry != end) {
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -277,18 +277,19 @@ CodeGeneratorShared::addNativeToBytecode
     return true;
 }
 
 void
 CodeGeneratorShared::dumpNativeToBytecodeEntries()
 {
 #ifdef JS_JITSPEW
     InlineScriptTree* topTree = gen->info().inlineScriptTree();
-    JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u\n",
-                 topTree->script()->filename(), topTree->script()->lineno());
+    JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u:%u\n",
+                 topTree->script()->filename(), topTree->script()->lineno(),
+                 topTree->script()->column());
     for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
         dumpNativeToBytecodeEntry(i);
 #endif
 }
 
 void
 CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
 {
@@ -300,27 +301,28 @@ CodeGeneratorShared::dumpNativeToBytecod
     unsigned nativeDelta = 0;
     unsigned pcDelta = 0;
     if (idx + 1 < nativeToBytecodeList_.length()) {
         NativeToBytecode* nextRef = &ref + 1;
         nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
         if (nextRef->tree == ref.tree)
             pcDelta = nextRef->pc - ref.pc;
     }
-    JitSpewStart(JitSpew_Profiling, "    %08zx [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%u",
+    JitSpewStart(JitSpew_Profiling, "    %08zx [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%u:%u",
                  ref.nativeOffset.offset(),
                  nativeDelta,
                  (long) (ref.pc - script->code()),
                  pcDelta,
                  CodeName[JSOp(*ref.pc)],
-                 script->filename(), script->lineno());
+                 script->filename(), script->lineno(), script->column());
 
     for (tree = tree->caller(); tree; tree = tree->caller()) {
-        JitSpewCont(JitSpew_Profiling, " <= %s:%u", tree->script()->filename(),
-                                                    tree->script()->lineno());
+        JitSpewCont(JitSpew_Profiling, " <= %s:%u:%u", tree->script()->filename(),
+                                                       tree->script()->lineno(),
+                                                       tree->script()->column());
     }
     JitSpewCont(JitSpew_Profiling, ")");
     JitSpewFin(JitSpew_Profiling);
 #endif
 }
 
 bool
 CodeGeneratorShared::addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
--- a/layout/base/tests/marionette/test_accessiblecaret_selection_mode.py
+++ b/layout/base/tests/marionette/test_accessiblecaret_selection_mode.py
@@ -490,43 +490,16 @@ class AccessibleCaretSelectionModeTestCa
         (caret1_x, caret1_y), (caret2_x, caret2_y) = sel.carets_location()
         self.actions.flick(body, caret2_x, caret2_y, start_caret_x, start_caret_y).perform()
 
         # Drag end caret back to the target word
         self.actions.flick(body, start_caret_x, start_caret_y, caret2_x, caret2_y).perform()
 
         self.assertEqual(self.to_unix_line_ending(sel.selected_content), 'select')
 
-    @skip_if_not_rotatable
-    def test_caret_position_after_changing_orientation_of_device(self):
-        '''Bug 1094072
-        If positions of carets are updated correctly, they should be draggable.
-        '''
-        self.open_test_html(self._longtext_html)
-        body = self.marionette.find_element(By.ID, 'bd')
-        longtext = self.marionette.find_element(By.ID, 'longtext')
-
-        # Select word in portrait mode, then change to landscape mode
-        self.marionette.set_orientation('portrait')
-        self.long_press_on_word(longtext, 12)
-        sel = SelectionManager(body)
-        (p_start_caret_x, p_start_caret_y), (p_end_caret_x, p_end_caret_y) = sel.carets_location()
-        self.marionette.set_orientation('landscape')
-        (l_start_caret_x, l_start_caret_y), (l_end_caret_x, l_end_caret_y) = sel.carets_location()
-
-        # Drag end caret to the start caret to change the selected content
-        self.actions.flick(body, l_end_caret_x, l_end_caret_y,
-                           l_start_caret_x, l_start_caret_y).perform()
-
-        # Change orientation back to portrait mode to prevent affecting
-        # other tests
-        self.marionette.set_orientation('portrait')
-
-        self.assertEqual(self.to_unix_line_ending(sel.selected_content), 'o')
-
     def test_select_word_inside_an_iframe(self):
         '''Bug 1088552
         The scroll offset in iframe should be taken into consideration properly.
         In this test, we scroll content in the iframe to the bottom to cause a
         huge offset. If we use the right coordinate system, selection should
         work. Otherwise, it would be hard to trigger select word.
         '''
         self.open_test_html(self._iframe_html)
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -533,16 +533,26 @@
     "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
     "expires_in_version": "never",
     "kind": "linear",
     "high": 100,
     "n_buckets": 50,
     "bug_numbers": [1372042],
     "description": "Percent of the cycle collector's forget skippable done during idle time"
   },
+  "FORGET_SKIPPABLE_FREQUENCY": {
+    "record_in_processes": ["main", "content"],
+    "alert_emails": ["smaug@mozilla.com", "dev-telemetry-gc-alerts@mozilla.org"],
+    "expires_in_version": "never",
+    "kind": "linear",
+    "high": 500,
+    "n_buckets": 100,
+    "bug_numbers": [1487271],
+    "description": "Number of forget skippables occurred during a minute"
+  },
   "FULLSCREEN_TRANSITION_BLACK_MS": {
     "record_in_processes": ["main", "content"],
     "alert_emails": ["xquan@mozilla.com"],
     "expires_in_version": "never",
     "kind": "exponential",
     "low": 100,
     "high": 5000,
     "n_buckets": 50,
--- a/toolkit/content/widgets/tabbox.js
+++ b/toolkit/content/widgets/tabbox.js
@@ -9,37 +9,23 @@
 class MozTabbox extends MozXULElement {
   constructor() {
     super();
     this._handleMetaAltArrows = /Mac/.test(navigator.platform);
     this.disconnectedCallback = this.disconnectedCallback.bind(this);
   }
 
   connectedCallback() {
-    switch (this.getAttribute("eventnode")) {
-      case "parent":
-        this._eventNode = this.parentNode;
-        break;
-      case "window":
-        this._eventNode = window;
-        break;
-      case "document":
-        this._eventNode = document;
-        break;
-      default:
-        this._eventNode = this;
-    }
-
-    Services.els.addSystemEventListener(this._eventNode, "keydown", this, false);
+    Services.els.addSystemEventListener(document, "keydown", this, false);
     window.addEventListener("unload", this.disconnectedCallback, { once: true });
   }
 
   disconnectedCallback() {
     window.removeEventListener("unload", this.disconnectedCallback);
-    Services.els.removeSystemEventListener(this._eventNode, "keydown", this, false);
+    Services.els.removeSystemEventListener(document, "keydown", this, false);
   }
 
   set handleCtrlTab(val) {
     this.setAttribute("handleCtrlTab", val);
     return val;
   }
 
   get handleCtrlTab() {
@@ -108,29 +94,16 @@ class MozTabbox extends MozXULElement {
     return val;
   }
 
   get selectedPanel() {
     var tabpanels = this.tabpanels;
     return tabpanels && tabpanels.selectedPanel;
   }
 
-  set eventNode(val) {
-    if (val != this._eventNode) {
-      Services.els.addSystemEventListener(val, "keydown", this, false);
-      Services.els.removeSystemEventListener(this._eventNode, "keydown", this, false);
-      this._eventNode = val;
-    }
-    return val;
-  }
-
-  get eventNode() {
-    return this._eventNode;
-  }
-
   handleEvent(event) {
     if (!event.isTrusted) {
       // Don't let untrusted events mess with tabs.
       return;
     }
 
     // Don't check if the event was already consumed because tab
     // navigation should always work for better user experience.