Merge mozilla-central to b2g-inbound
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Wed, 18 Jun 2014 14:45:28 +0200
changeset 189353 a6403f6c02135600639c2f5c86977b3ed7441aea
parent 189352 4fe79d16c1807f79db6bf599b8fab94024da3b6e (current diff)
parent 189292 1cea544c74c5f95cd32f0444b08fe285fde57a44 (diff)
child 189354 7cce90463c0a8e2c2e5a50c527d85f7c074fa613
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
milestone33.0a1
Merge mozilla-central to b2g-inbound
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -6,16 +6,17 @@ dnl
 builtin(include, build/autoconf/hotfixes.m4)dnl
 builtin(include, build/autoconf/acwinpaths.m4)dnl
 builtin(include, build/autoconf/hooks.m4)dnl
 builtin(include, build/autoconf/config.status.m4)dnl
 builtin(include, build/autoconf/toolchain.m4)dnl
 builtin(include, build/autoconf/ccache.m4)dnl
 builtin(include, build/autoconf/wrapper.m4)dnl
 builtin(include, build/autoconf/nspr.m4)dnl
+builtin(include, build/autoconf/nspr-build.m4)dnl
 builtin(include, build/autoconf/nss.m4)dnl
 builtin(include, build/autoconf/pkg.m4)dnl
 builtin(include, build/autoconf/codeset.m4)dnl
 builtin(include, build/autoconf/altoptions.m4)dnl
 builtin(include, build/autoconf/mozprog.m4)dnl
 builtin(include, build/autoconf/mozheader.m4)dnl
 builtin(include, build/autoconf/mozcommonheader.m4)dnl
 builtin(include, build/autoconf/lto.m4)dnl
--- a/browser/base/content/newtab/search.js
+++ b/browser/base/content/newtab/search.js
@@ -153,17 +153,20 @@ let gSearch = {
       this._send("SetCurrentEngine", engine.name);
       panel.hidePopup();
       this._nodes.text.focus();
     });
 
     let image = document.createElementNS(XUL_NAMESPACE, "image");
     if (engine.iconBuffer) {
       let blob = new Blob([engine.iconBuffer]);
-      image.setAttribute("src", URL.createObjectURL(blob));
+      let size = Math.round(16 * window.devicePixelRatio);
+      let sizeStr = size + "," + size;
+      let uri = URL.createObjectURL(blob) + "#-moz-resolution=" + sizeStr;
+      image.setAttribute("src", uri);
     }
     box.appendChild(image);
 
     let label = document.createElementNS(XUL_NAMESPACE, "label");
     label.setAttribute("value", engine.name);
     box.appendChild(label);
 
     return box;
new file mode 100644
--- /dev/null
+++ b/build/autoconf/nspr-build.m4
@@ -0,0 +1,269 @@
+dnl This Source Code Form is subject to the terms of the Mozilla Public
+dnl License, v. 2.0. If a copy of the MPL was not distributed with this
+dnl file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+AC_DEFUN([MOZ_CONFIG_NSPR], [
+
+ifelse([$1],,define(CONFIGURING_JS,yes))
+
+dnl Possible ways this can be called:
+dnl   from toplevel configure:
+dnl     JS_STANDALONE=  BUILDING_JS=
+dnl   from js/src/configure invoked by toplevel configure:
+dnl     JS_STANDALONE=  BUILDING_JS=1
+dnl   from standalone js/src/configure:
+dnl     JS_STANDALONE=1 BUILDING_JS=1
+
+dnl ========================================================
+dnl = Find the right NSPR to use.
+dnl ========================================================
+MOZ_ARG_WITH_STRING(nspr-cflags,
+[  --with-nspr-cflags=FLAGS
+                          Pass FLAGS to CC when building code that uses NSPR.
+                          Use this when there's no accurate nspr-config
+                          script available.  This is the case when building
+                          SpiderMonkey as part of the Mozilla tree: the
+                          top-level configure script computes NSPR flags
+                          that accomodate the quirks of that environment.],
+    NSPR_CFLAGS=$withval)
+MOZ_ARG_WITH_STRING(nspr-libs,
+[  --with-nspr-libs=LIBS   Pass LIBS to LD when linking code that uses NSPR.
+                          See --with-nspr-cflags for more details.],
+    NSPR_LIBS=$withval)
+
+ifdef([CONFIGURING_JS],[
+    MOZ_ARG_ENABLE_BOOL(nspr-build,
+[  --enable-nspr-build     Configure and build NSPR from source tree],
+        MOZ_BUILD_NSPR=1,
+        MOZ_BUILD_NSPR=)
+])
+
+if test -z "$BUILDING_JS" || test -n "$JS_STANDALONE"; then
+  _IS_OUTER_CONFIGURE=1
+fi
+
+MOZ_ARG_WITH_BOOL(system-nspr,
+[  --with-system-nspr      Use an NSPR that is already built and installed.
+                          Use the 'nspr-config' script in the current path,
+                          or look for the script in the directories given with
+                          --with-nspr-exec-prefix or --with-nspr-prefix.
+                          (Those flags are only checked if you specify
+                          --with-system-nspr.)],
+    _USE_SYSTEM_NSPR=1 )
+
+if test -z "$BUILDING_JS"; then
+    JS_THREADSAFE=1
+fi
+
+JS_POSIX_NSPR=unset
+ifdef([CONFIGURING_JS],[
+    if test -n "$JS_STANDALONE"; then
+      case "$target" in
+        *linux*|*darwin*|*dragonfly*|*freebsd*|*netbsd*|*openbsd*)
+          if test -z "$_HAS_NSPR" && test "$JS_THREADSAFE"; then
+            JS_POSIX_NSPR_DEFAULT=1
+          fi
+          ;;
+      esac
+    fi
+
+    MOZ_ARG_ENABLE_BOOL(posix-nspr-emulation,
+[  --enable-posix-nspr-emulation
+                          Enable emulation of NSPR for POSIX systems],
+    JS_POSIX_NSPR=1,
+    JS_POSIX_NSPR=)
+])
+
+dnl Pass at most one of
+dnl   --with-system-nspr
+dnl   --with-nspr-cflags/libs
+dnl   --enable-nspr-build
+dnl   --enable-posix-nspr-emulation
+
+AC_MSG_CHECKING([NSPR selection])
+nspr_opts=
+which_nspr=default
+if test -n "$_USE_SYSTEM_NSPR"; then
+    nspr_opts="x$nspr_opts"
+    which_nspr="system"
+fi
+if test -n "$NSPR_CFLAGS" -o -n "$NSPR_LIBS"; then
+    nspr_opts="x$nspr_opts"
+    which_nspr="command-line"
+fi
+if test -n "$MOZ_BUILD_NSPR"; then
+    nspr_opts="x$nspr_opts"
+    which_nspr="source-tree"
+fi
+if test "$JS_POSIX_NSPR" = unset; then
+    JS_POSIX_NSPR=
+else
+    nspr_opts="x$nspr_opts"
+    which_nspr="posix-wrapper"
+fi
+
+if test -z "$nspr_opts"; then
+    if test -z "$BUILDING_JS"; then
+      dnl Toplevel configure defaults to using nsprpub from the source tree
+      MOZ_BUILD_NSPR=1
+      which_nspr="source-tree"
+    else
+      dnl JS configure defaults to emulated NSPR if available, falling back
+      dnl to nsprpub.
+      if test -n "$JS_THREADSAFE"; then
+          JS_POSIX_NSPR="$JS_POSIX_NSPR_DEFAULT"
+          if test -z "$JS_POSIX_NSPR"; then
+             MOZ_BUILD_NSPR=1
+             which_nspr="source-tree"
+          else
+             which_nspr="posix-wrapper"
+          fi
+      else
+          which_nspr="none"
+      fi
+   fi
+fi
+
+if test -z "$nspr_opts" || test "$nspr_opts" = x; then
+    AC_MSG_RESULT($which_nspr)
+else
+    AC_MSG_ERROR([only one way of using NSPR may be selected. See 'configure --help'.])
+fi
+
+AC_SUBST(MOZ_BUILD_NSPR)
+
+if test -n "$BUILDING_JS"; then
+  if test "$JS_POSIX_NSPR" = 1; then
+    AC_DEFINE(JS_POSIX_NSPR)
+  fi
+  AC_SUBST(JS_POSIX_NSPR)
+fi
+
+# A (sub)configure invoked by the toplevel configure will always receive
+# --with-nspr-libs on the command line. It will never need to figure out
+# anything itself.
+if test -n "$_IS_OUTER_CONFIGURE"; then
+
+if test -n "$_USE_SYSTEM_NSPR"; then
+    AM_PATH_NSPR($NSPR_MINVER, [MOZ_NATIVE_NSPR=1], [AC_MSG_ERROR([you do not have NSPR installed or your version is older than $NSPR_MINVER.])])
+fi
+
+if test -n "$MOZ_NATIVE_NSPR" -o -n "$NSPR_CFLAGS" -o -n "$NSPR_LIBS"; then
+    _SAVE_CFLAGS=$CFLAGS
+    CFLAGS="$CFLAGS $NSPR_CFLAGS"
+    AC_TRY_COMPILE([#include "prtypes.h"],
+                [#ifndef PR_STATIC_ASSERT
+                 #error PR_STATIC_ASSERT not defined or requires including prtypes.h
+                 #endif],
+                ,
+                AC_MSG_ERROR([system NSPR does not support PR_STATIC_ASSERT or including prtypes.h does not provide it]))
+    AC_TRY_COMPILE([#include "prtypes.h"],
+                [#ifndef PR_UINT64
+                 #error PR_UINT64 not defined or requires including prtypes.h
+                 #endif],
+                ,
+                AC_MSG_ERROR([system NSPR does not support PR_UINT64 or including prtypes.h does not provide it]))
+    CFLAGS=$_SAVE_CFLAGS
+elif test -z "$JS_POSIX_NSPR" -a -n "$JS_THREADSAFE"; then
+    if test -z "$LIBXUL_SDK"; then
+        NSPR_CFLAGS="-I${LIBXUL_DIST}/include/nspr"
+        if test -n "$GNU_CC"; then
+            NSPR_LIBS="-L${LIBXUL_DIST}/lib -lnspr${NSPR_VERSION} -lplc${NSPR_VERSION} -lplds${NSPR_VERSION}"
+        else
+            NSPR_LIBS="${LIBXUL_DIST}/lib/nspr${NSPR_VERSION}.lib ${LIBXUL_DIST}/lib/plc${NSPR_VERSION}.lib ${LIBXUL_DIST}/lib/plds${NSPR_VERSION}.lib "
+        fi
+    else
+        NSPR_CFLAGS=`"${LIBXUL_DIST}"/sdk/bin/nspr-config --prefix="${LIBXUL_DIST}" --includedir="${LIBXUL_DIST}/include/nspr" --cflags`
+        NSPR_LIBS=`"${LIBXUL_DIST}"/sdk/bin/nspr-config --prefix="${LIBXUL_DIST}" --libdir="${LIBXUL_DIST}"/lib --libs`
+    fi
+fi
+
+AC_SUBST(NSPR_CFLAGS)
+AC_SUBST(NSPR_LIBS)
+
+NSPR_PKGCONF_CHECK="nspr"
+if test -n "$MOZ_NATIVE_NSPR"; then
+    # piggy back on $MOZ_NATIVE_NSPR to set a variable for the nspr check for js.pc
+    NSPR_PKGCONF_CHECK="nspr >= $NSPR_MINVER"
+
+    _SAVE_CFLAGS=$CFLAGS
+    CFLAGS="$CFLAGS $NSPR_CFLAGS"
+    AC_TRY_COMPILE([#include "prlog.h"],
+                [#ifndef PR_STATIC_ASSERT
+                 #error PR_STATIC_ASSERT not defined
+                 #endif],
+                ,
+                AC_MSG_ERROR([system NSPR does not support PR_STATIC_ASSERT]))
+    CFLAGS=$_SAVE_CFLAGS
+fi
+AC_SUBST(NSPR_PKGCONF_CHECK)
+
+fi # _IS_OUTER_CONFIGURE
+
+])
+
+AC_DEFUN([MOZ_SUBCONFIGURE_NSPR], [
+
+if test -z "$MOZ_NATIVE_NSPR"; then
+    ac_configure_args="$_SUBDIR_CONFIG_ARGS --with-dist-prefix=$MOZ_BUILD_ROOT/dist --with-mozilla"
+    if test -z "$MOZ_DEBUG"; then
+        ac_configure_args="$ac_configure_args --disable-debug"
+    else
+        ac_configure_args="$ac_configure_args --enable-debug"
+    fi
+    if test "$MOZ_OPTIMIZE" = "1"; then
+        ac_configure_args="$ac_configure_args --enable-optimize"
+    elif test -z "$MOZ_OPTIMIZE"; then
+        ac_configure_args="$ac_configure_args --disable-optimize"
+    fi
+    if test -n "$HAVE_64BIT_OS"; then
+        ac_configure_args="$ac_configure_args --enable-64bit"
+    fi
+    if test -n "$USE_ARM_KUSER"; then
+        ac_configure_args="$ac_configure_args --with-arm-kuser"
+    fi
+    # A configure script generated by autoconf 2.68 does not allow the cached
+    # values of "precious" variables such as CFLAGS and LDFLAGS to differ from
+    # the values passed to the configure script. Since we modify CFLAGS and
+    # LDFLAGS before passing them to NSPR's configure script, we cannot share
+    # config.cache with NSPR. As a result, we cannot pass AS, CC, CXX, etc. to
+    # NSPR via a shared config.cache file and must pass them to NSPR on the
+    # configure command line.
+    for var in AS CC CXX CPP LD AR RANLIB STRIP; do
+        ac_configure_args="$ac_configure_args $var='`eval echo \\${${var}}`'"
+    done
+    # A configure script generated by autoconf 2.68 warns if --host is
+    # specified but --build isn't. So we always pass --build to NSPR's
+    # configure script.
+    ac_configure_args="$ac_configure_args --build=$build"
+    ac_configure_args="$ac_configure_args $NSPR_CONFIGURE_ARGS"
+
+    # Save these, so we can mess with them for the subconfigure ..
+    _SAVE_CFLAGS="$CFLAGS"
+    _SAVE_CPPFLAGS="$CPPFLAGS"
+    _SAVE_LDFLAGS="$LDFLAGS"
+
+    if test -n "$MOZ_LINKER" -a "$ac_cv_func_dladdr" = no ; then
+      # dladdr is supported by the new linker, even when the system linker doesn't
+      # support it. Trick nspr into using dladdr when it's not supported.
+      export CPPFLAGS="-include $_topsrcdir/mozglue/linker/dladdr.h $CPPFLAGS"
+    fi
+    export LDFLAGS="$LDFLAGS $NSPR_LDFLAGS"
+    export CFLAGS="$CFLAGS $MOZ_FRAMEPTR_FLAGS"
+
+    # Use a separate cache file for NSPR since it uses autoconf 2.68.
+    _save_cache_file="$cache_file"
+    cache_file=$_objdir/nsprpub/config.cache
+
+    AC_OUTPUT_SUBDIRS(nsprpub)
+
+    # .. and restore them
+    cache_file="$_save_cache_file"
+    CFLAGS="$_SAVE_CFLAGS"
+    CPPFLAGS="$_SAVE_CPPFLAGS"
+    LDFLAGS="$_SAVE_LDFLAGS"
+
+    ac_configure_args="$_SUBDIR_CONFIG_ARGS"
+fi
+
+])
--- a/build/mobile/robocop/AndroidManifest.xml.in
+++ b/build/mobile/robocop/AndroidManifest.xml.in
@@ -18,26 +18,28 @@
         <!-- Fake handlers to ensure that we have some share intents to show in our share handler list -->
         <activity android:name="org.mozilla.gecko.RobocopShare1"
                   android:label="Robocop fake activity">
 
             <intent-filter android:label="Fake robocop share handler 1">
                 <action android:name="android.intent.action.SEND" />
                 <category android:name="android.intent.category.DEFAULT" />
                 <data android:mimeType="text/*" />
+                <data android:mimeType="image/*" />
             </intent-filter>
 
         </activity>
 
         <activity android:name="org.mozilla.gecko.RobocopShare2"
                   android:label="Robocop fake activity 2">
 
             <intent-filter android:label="Fake robocop share handler 2">
                 <action android:name="android.intent.action.SEND" />
                 <category android:name="android.intent.category.DEFAULT" />
                 <data android:mimeType="text/*" />
+                <data android:mimeType="image/*" />
             </intent-filter>
 
         </activity>
 
     </application>
 
 </manifest>
--- a/configure.in
+++ b/configure.in
@@ -48,16 +48,17 @@ dnl ====================================
 _SUBDIR_HOST_LDFLAGS="$HOST_LDFLAGS"
 _SUBDIR_CONFIG_ARGS="$ac_configure_args"
 
 dnl Set the version number of the libs included with mozilla
 dnl ========================================================
 MOZJPEG=62
 MOZPNG=10610
 NSPR_VERSION=4
+NSPR_MINVER=4.10.3
 NSS_VERSION=3
 
 dnl Set the minimum version of toolkit libs used by mozilla
 dnl ========================================================
 GLIB_VERSION=1.2.0
 PERL_VERSION=5.006
 CAIRO_VERSION=1.10
 PANGO_VERSION=1.22.0
@@ -126,17 +127,17 @@ if test "$_conflict_files"; then
   *   To clean up the source tree:
   *     1. cd $_topsrcdir
   *     2. gmake distclean
   ***
 EOF
   exit 1
   break
 fi
-MOZ_BUILD_ROOT=`pwd`
+MOZ_BUILD_ROOT=`pwd -W 2>/dev/null || pwd`
 
 MOZ_PYTHON
 
 MOZ_DEFAULT_COMPILER
 
 COMPILE_ENVIRONMENT=1
 MOZ_ARG_DISABLE_BOOL(compile-environment,
 [  --disable-compile-environment
@@ -2195,17 +2196,16 @@ ia64*-hpux*)
     TARGET_NSPR_MDCPUCFG='\"md/_win95.cfg\"'
 
     dnl set NO_X11 defines here as the general check is skipped on win32
     no_x=yes
     AC_DEFINE(NO_X11)
 
     case "$host" in
     *-mingw*)
-        MOZ_BUILD_ROOT=`cd $MOZ_BUILD_ROOT && pwd -W`
         if test -n "$L10NBASEDIR"; then
             L10NBASEDIR=`cd $L10NBASEDIR && pwd -W`
         fi
         ;;
     esac
 
     case "$host" in
     *-mingw*)
@@ -3438,57 +3438,17 @@ fi
 AC_SUBST(LIBXUL_DIST)
 
 SYSTEM_LIBXUL=
 
 MOZ_ARG_WITH_BOOL(system-libxul,
 [  --with-system-libxul    Use system installed libxul SDK],
     SYSTEM_LIBXUL=1)
 
-dnl ========================================================
-dnl = If NSPR was not detected in the system,
-dnl = use the one in the source tree (mozilla/nsprpub)
-dnl ========================================================
-MOZ_ARG_WITH_BOOL(system-nspr,
-[  --with-system-nspr      Use system installed NSPR],
-    _USE_SYSTEM_NSPR=1 )
-
-if test -n "$_USE_SYSTEM_NSPR"; then
-    AM_PATH_NSPR(4.10.6, [MOZ_NATIVE_NSPR=1], [AC_MSG_ERROR([your don't have NSPR installed or your version is too old])])
-fi
-
-if test -n "$MOZ_NATIVE_NSPR"; then
-    _SAVE_CFLAGS=$CFLAGS
-    CFLAGS="$CFLAGS $NSPR_CFLAGS"
-    AC_TRY_COMPILE([#include "prtypes.h"],
-                [#ifndef PR_STATIC_ASSERT
-                 #error PR_STATIC_ASSERT not defined or requires including prtypes.h
-                 #endif],
-                [MOZ_NATIVE_NSPR=1],
-                AC_MSG_ERROR([system NSPR does not support PR_STATIC_ASSERT or including prtypes.h does not provide it]))
-    AC_TRY_COMPILE([#include "prtypes.h"],
-                [#ifndef PR_UINT64
-                 #error PR_UINT64 not defined or requires including prtypes.h
-                 #endif],
-                [MOZ_NATIVE_NSPR=1],
-                AC_MSG_ERROR([system NSPR does not support PR_UINT64 or including prtypes.h does not provide it]))
-    CFLAGS=$_SAVE_CFLAGS
-else
-    if test -z "$LIBXUL_SDK"; then
-        NSPR_CFLAGS="-I${LIBXUL_DIST}/include/nspr"
-        if test -n "$GNU_CC"; then
-            NSPR_LIBS="-L${LIBXUL_DIST}/lib -lnspr${NSPR_VERSION} -lplc${NSPR_VERSION} -lplds${NSPR_VERSION}"
-        else
-            NSPR_LIBS="${LIBXUL_DIST}/lib/nspr${NSPR_VERSION}.lib ${LIBXUL_DIST}/lib/plc${NSPR_VERSION}.lib ${LIBXUL_DIST}/lib/plds${NSPR_VERSION}.lib "
-        fi
-    else
-        NSPR_CFLAGS=`"${LIBXUL_DIST}"/sdk/bin/nspr-config --prefix="${LIBXUL_DIST}" --includedir="${LIBXUL_DIST}/include/nspr" --cflags`
-        NSPR_LIBS=`"${LIBXUL_DIST}"/sdk/bin/nspr-config --prefix="${LIBXUL_DIST}" --libdir="${LIBXUL_DIST}"/lib --libs`
-    fi
-fi
+MOZ_CONFIG_NSPR()
 
 dnl set GRE_MILESTONE
 dnl ========================================================
 if test -n "$LIBXUL_SDK"; then
     GRE_MILESTONE=`$PYTHON "$_topsrcdir"/config/printconfigsetting.py "$LIBXUL_DIST"/bin/platform.ini Build Milestone`
 else
     GRE_MILESTONE=`tail -n 1 "$_topsrcdir"/config/milestone.txt 2>/dev/null || tail -1 "$_topsrcdir"/config/milestone.txt`
 fi
@@ -9171,80 +9131,20 @@ if test "$COMPILE_ENVIRONMENT" -a -z "$L
 
 export WRAP_LDFLAGS
 
 if test -n "$_WRAP_MALLOC"; then
     # Avoid doubling wrap malloc arguments
     _SUBDIR_CONFIG_ARGS="`echo $_SUBDIR_CONFIG_ARGS | sed -e 's/--enable-wrap-malloc *//'`"
 fi
 
-if test -z "$MOZ_NATIVE_NSPR"; then
-    ac_configure_args="$_SUBDIR_CONFIG_ARGS --with-dist-prefix=$MOZ_BUILD_ROOT/dist --with-mozilla"
-    if test -z "$MOZ_DEBUG"; then
-        ac_configure_args="$ac_configure_args --disable-debug"
-    else
-        ac_configure_args="$ac_configure_args --enable-debug"
+MOZ_SUBCONFIGURE_NSPR()
         if test -n "$MOZ_NO_DEBUG_RTL"; then
             ac_configure_args="$ac_configure_args --disable-debug-rtl"
         fi
-    fi
-    if test "$MOZ_OPTIMIZE" = "1"; then
-        ac_configure_args="$ac_configure_args --enable-optimize"
-    elif test -z "$MOZ_OPTIMIZE"; then
-        ac_configure_args="$ac_configure_args --disable-optimize"
-    fi
-    if test -n "$HAVE_64BIT_OS"; then
-        ac_configure_args="$ac_configure_args --enable-64bit"
-    fi
-    if test -n "$USE_ARM_KUSER"; then
-        ac_configure_args="$ac_configure_args --with-arm-kuser"
-    fi
-    # A configure script generated by autoconf 2.68 does not allow the cached
-    # values of "precious" variables such as CFLAGS and LDFLAGS to differ from
-    # the values passed to the configure script. Since we modify CFLAGS and
-    # LDFLAGS before passing them to NSPR's configure script, we cannot share
-    # config.cache with NSPR. As a result, we cannot pass AS, CC, CXX, etc. to
-    # NSPR via a shared config.cache file and must pass them to NSPR on the
-    # configure command line.
-    for var in AS CC CXX CPP LD AR RANLIB STRIP; do
-        ac_configure_args="$ac_configure_args $var='`eval echo \\${${var}}`'"
-    done
-    # A configure script generated by autoconf 2.68 warns if --host is
-    # specified but --build isn't. So we always pass --build to NSPR's
-    # configure script.
-    ac_configure_args="$ac_configure_args --build=$build"
-    ac_configure_args="$ac_configure_args $NSPR_CONFIGURE_ARGS"
-
-    # Save these, so we can mess with them for the subconfigure ..
-    _SAVE_CFLAGS="$CFLAGS"
-    _SAVE_CPPFLAGS="$CPPFLAGS"
-    _SAVE_LDFLAGS="$LDFLAGS"
-
-    if test -n "$MOZ_LINKER" -a "$ac_cv_func_dladdr" = no ; then
-      # dladdr is supported by the new linker, even when the system linker doesn't
-      # support it. Trick nspr into using dladdr when it's not supported.
-      export CPPFLAGS="-include $_topsrcdir/mozglue/linker/dladdr.h $CPPFLAGS"
-    fi
-    export LDFLAGS="$LDFLAGS $NSPR_LDFLAGS"
-    export CFLAGS="$CFLAGS $MOZ_FRAMEPTR_FLAGS"
-
-    # Use a separate cache file for NSPR since it uses autoconf 2.68.
-    _save_cache_file="$cache_file"
-    cache_file=$_objdir/nsprpub/config.cache
-
-    AC_OUTPUT_SUBDIRS(nsprpub)
-
-    # .. and restore them
-    cache_file="$_save_cache_file"
-    CFLAGS="$_SAVE_CFLAGS"
-    CPPFLAGS="$_SAVE_CPPFLAGS"
-    LDFLAGS="$_SAVE_LDFLAGS"
-
-    ac_configure_args="$_SUBDIR_CONFIG_ARGS"
-fi
 
 dnl ========================================================
 dnl = Setup a nice relatively clean build environment for
 dnl = sub-configures.
 dnl ========================================================
 CC="$_SUBDIR_CC"
 CXX="$_SUBDIR_CXX"
 CFLAGS="$_SUBDIR_CFLAGS"
@@ -9257,20 +9157,24 @@ HOST_CXXFLAGS="$_SUBDIR_HOST_CXXFLAGS"
 HOST_LDFLAGS="$_SUBDIR_HOST_LDFLAGS"
 RC=
 
 if test -n "$ENABLE_CLANG_PLUGIN"; then
     ac_configure_args="$_SUBDIR_CONFIG_ARGS"
     AC_OUTPUT_SUBDIRS(build/clang-plugin)
 fi
 
-
 # Run the SpiderMonkey 'configure' script.
 dist=$MOZ_BUILD_ROOT/dist
 ac_configure_args="$_SUBDIR_CONFIG_ARGS"
+
+# --with-system-nspr will have been converted into the relevant $NSPR_CFLAGS
+# and $NSPR_LIBS.
+ac_configure_args="`echo $ac_configure_args | sed -e 's/--with-system-nspr\S* *//'`"
+
 ac_configure_args="$ac_configure_args --enable-threadsafe"
 
 if test "$_INTL_API" = no; then
     ac_configure_args="$ac_configure_args --without-intl-api"
 fi
 
 if test "$BUILD_CTYPES"; then
     # Build js-ctypes on the platforms we can.
@@ -9283,17 +9187,17 @@ if test -z "$JS_SHARED_LIBRARY" ; then
     fi
 fi
 if test -z "$JSGC_USE_EXACT_ROOTING" ; then
     ac_configure_args="$ac_configure_args --disable-exact-rooting"
 fi
 if test -z "$JSGC_GENERATIONAL" ; then
     ac_configure_args="$ac_configure_args --disable-gcgenerational"
 fi
-if test -z "$MOZ_NATIVE_NSPR"; then
+if test -n "$NSPR_CFLAGS" -o -n "$NSPR_LIBS"; then
     ac_configure_args="$ac_configure_args --with-nspr-cflags='$NSPR_CFLAGS'"
     ac_configure_args="$ac_configure_args --with-nspr-libs='$NSPR_LIBS'"
 fi
 ac_configure_args="$ac_configure_args --prefix=$dist"
 if test "$MOZ_MEMORY"; then
    ac_configure_args="$ac_configure_args --enable-jemalloc"
 fi
 if test -n "$MOZ_GLUE_LDFLAGS"; then
--- a/content/base/public/nsContentUtils.h
+++ b/content/base/public/nsContentUtils.h
@@ -2035,18 +2035,21 @@ public:
   };
   /**
    * Parses the value of the autocomplete attribute into aResult, ensuring it's
    * composed of valid tokens, otherwise the value "" is used.
    * Note that this method is used for form fields, not on a <form> itself.
    *
    * @return whether aAttr was valid and can be cached.
    */
-  static AutocompleteAttrState SerializeAutocompleteAttribute(const nsAttrValue* aAttr,
-                                                          nsAString& aResult);
+  static AutocompleteAttrState
+  SerializeAutocompleteAttribute(const nsAttrValue* aAttr,
+                                 nsAString& aResult,
+                                 AutocompleteAttrState aCachedState =
+                                   eAutocompleteAttrState_Unknown);
 
   /**
    * This will parse aSource, to extract the value of the pseudo attribute
    * with the name specified in aName. See
    * http://www.w3.org/TR/xml-stylesheet/#NT-StyleSheetPI for the specification
    * which is used to parse aSource.
    *
    * @param aSource the string to parse
--- a/content/base/src/nsContentUtils.cpp
+++ b/content/base/src/nsContentUtils.cpp
@@ -850,35 +850,53 @@ nsContentUtils::IsAutocompleteEnabled(ns
     form->GetAutocomplete(autocomplete);
   }
 
   return !autocomplete.EqualsLiteral("off");
 }
 
 nsContentUtils::AutocompleteAttrState
 nsContentUtils::SerializeAutocompleteAttribute(const nsAttrValue* aAttr,
-                                           nsAString& aResult)
-{
+                                               nsAString& aResult,
+                                               AutocompleteAttrState aCachedState)
+{
+  if (!aAttr ||
+      aCachedState == nsContentUtils::eAutocompleteAttrState_Invalid) {
+    return aCachedState;
+  }
+
+  if (aCachedState == nsContentUtils::eAutocompleteAttrState_Valid) {
+    uint32_t atomCount = aAttr->GetAtomCount();
+    for (uint32_t i = 0; i < atomCount; i++) {
+      if (i != 0) {
+        aResult.Append(' ');
+      }
+      aResult.Append(nsDependentAtomString(aAttr->AtomAt(i)));
+    }
+    nsContentUtils::ASCIIToLower(aResult);
+    return aCachedState;
+  }
+
   AutocompleteAttrState state = InternalSerializeAutocompleteAttribute(aAttr, aResult);
   if (state == eAutocompleteAttrState_Valid) {
     ASCIIToLower(aResult);
   } else {
     aResult.Truncate();
   }
   return state;
 }
 
 /**
  * Helper to validate the @autocomplete tokens.
  *
  * @return {AutocompleteAttrState} The state of the attribute (invalid/valid).
  */
 nsContentUtils::AutocompleteAttrState
 nsContentUtils::InternalSerializeAutocompleteAttribute(const nsAttrValue* aAttrVal,
-                                                   nsAString& aResult)
+                                                       nsAString& aResult)
 {
   // No sandbox attribute so we are done
   if (!aAttrVal) {
     return eAutocompleteAttrState_Invalid;
   }
 
   uint32_t numTokens = aAttrVal->GetAtomCount();
   if (!numTokens) {
--- a/content/html/content/src/HTMLInputElement.cpp
+++ b/content/html/content/src/HTMLInputElement.cpp
@@ -1515,33 +1515,20 @@ NS_IMPL_STRING_ATTR(HTMLInputElement, Pl
 NS_IMPL_ENUM_ATTR_DEFAULT_VALUE(HTMLInputElement, Type, type,
                                 kInputDefaultType->tag)
 
 NS_IMETHODIMP
 HTMLInputElement::GetAutocomplete(nsAString& aValue)
 {
   aValue.Truncate(0);
   const nsAttrValue* attributeVal = GetParsedAttr(nsGkAtoms::autocomplete);
-  if (!attributeVal ||
-      mAutocompleteAttrState == nsContentUtils::eAutocompleteAttrState_Invalid) {
-    return NS_OK;
-  }
-  if (mAutocompleteAttrState == nsContentUtils::eAutocompleteAttrState_Valid) {
-    uint32_t atomCount = attributeVal->GetAtomCount();
-    for (uint32_t i = 0; i < atomCount; i++) {
-      if (i != 0) {
-        aValue.Append(' ');
-      }
-      aValue.Append(nsDependentAtomString(attributeVal->AtomAt(i)));
-    }
-    nsContentUtils::ASCIIToLower(aValue);
-    return NS_OK;
-  }
-
-  mAutocompleteAttrState = nsContentUtils::SerializeAutocompleteAttribute(attributeVal, aValue);
+
+  mAutocompleteAttrState =
+    nsContentUtils::SerializeAutocompleteAttribute(attributeVal, aValue,
+                                                   mAutocompleteAttrState);
   return NS_OK;
 }
 
 NS_IMETHODIMP
 HTMLInputElement::SetAutocomplete(const nsAString& aValue)
 {
   return SetAttr(kNameSpaceID_None, nsGkAtoms::autocomplete, nullptr, aValue, true);
 }
--- a/content/html/content/src/HTMLSelectElement.cpp
+++ b/content/html/content/src/HTMLSelectElement.cpp
@@ -99,16 +99,17 @@ SafeOptionListMutation::~SafeOptionListM
 
 // construction, destruction
 
 
 HTMLSelectElement::HTMLSelectElement(already_AddRefed<nsINodeInfo>& aNodeInfo,
                                      FromParser aFromParser)
   : nsGenericHTMLFormElementWithState(aNodeInfo),
     mOptions(new HTMLOptionsCollection(MOZ_THIS_IN_INITIALIZER_LIST())),
+    mAutocompleteAttrState(nsContentUtils::eAutocompleteAttrState_Unknown),
     mIsDoneAddingChildren(!aFromParser),
     mDisabledChanged(false),
     mMutating(false),
     mInhibitStateRestoration(!!(aFromParser & FROM_PARSER_FRAGMENT)),
     mSelectionHasChanged(false),
     mDefaultSelectionSet(false),
     mCanShowInvalidUI(true),
     mCanShowValidUI(true),
@@ -172,16 +173,26 @@ HTMLSelectElement::SetCustomValidity(con
 {
   nsIConstraintValidation::SetCustomValidity(aError);
 
   UpdateState(true);
 
   return NS_OK;
 }
 
+void
+HTMLSelectElement::GetAutocomplete(DOMString& aValue)
+{
+  const nsAttrValue* attributeVal = GetParsedAttr(nsGkAtoms::autocomplete);
+
+  mAutocompleteAttrState =
+    nsContentUtils::SerializeAutocompleteAttribute(attributeVal, aValue,
+                                                   mAutocompleteAttrState);
+}
+
 NS_IMETHODIMP
 HTMLSelectElement::GetForm(nsIDOMHTMLFormElement** aForm)
 {
   return nsGenericHTMLFormElementWithState::GetForm(aForm);
 }
 
 nsresult
 HTMLSelectElement::InsertChildAt(nsIContent* aKid,
@@ -1328,16 +1339,19 @@ nsresult
 HTMLSelectElement::AfterSetAttr(int32_t aNameSpaceID, nsIAtom* aName,
                                 const nsAttrValue* aValue, bool aNotify)
 {
   if (aNameSpaceID == kNameSpaceID_None) {
     if (aName == nsGkAtoms::disabled) {
       UpdateBarredFromConstraintValidation();
     } else if (aName == nsGkAtoms::required) {
       UpdateValueMissingValidityState();
+    } else if (aName == nsGkAtoms::autocomplete) {
+      // Clear the cached @autocomplete attribute state
+      mAutocompleteAttrState = nsContentUtils::eAutocompleteAttrState_Unknown;
     }
 
     UpdateState(aNotify);
   }
 
   return nsGenericHTMLFormElementWithState::AfterSetAttr(aNameSpaceID, aName,
                                                          aValue, aNotify);
 }
@@ -1415,18 +1429,23 @@ HTMLSelectElement::DoneAddingChildren(bo
 }
 
 bool
 HTMLSelectElement::ParseAttribute(int32_t aNamespaceID,
                                   nsIAtom* aAttribute,
                                   const nsAString& aValue,
                                   nsAttrValue& aResult)
 {
-  if (aAttribute == nsGkAtoms::size && kNameSpaceID_None == aNamespaceID) {
-    return aResult.ParsePositiveIntValue(aValue);
+  if (kNameSpaceID_None == aNamespaceID) {
+    if (aAttribute == nsGkAtoms::size) {
+      return aResult.ParsePositiveIntValue(aValue);
+    } else if (aAttribute == nsGkAtoms::autocomplete) {
+      aResult.ParseAtomArray(aValue);
+      return true;
+    }
   }
   return nsGenericHTMLElement::ParseAttribute(aNamespaceID, aAttribute, aValue,
                                               aResult);
 }
 
 void
 HTMLSelectElement::MapAttributesIntoRule(const nsMappedAttributes* aAttributes,
                                          nsRuleData* aData)
--- a/content/html/content/src/HTMLSelectElement.h
+++ b/content/html/content/src/HTMLSelectElement.h
@@ -12,16 +12,17 @@
 
 #include "mozilla/dom/BindingDeclarations.h"
 #include "mozilla/dom/HTMLOptionsCollection.h"
 #include "mozilla/ErrorResult.h"
 #include "nsCheapSets.h"
 #include "nsCOMPtr.h"
 #include "nsError.h"
 #include "mozilla/dom/HTMLFormElement.h"
+#include "nsContentUtils.h"
 
 class nsContentList;
 class nsIDOMHTMLOptionElement;
 class nsIHTMLCollection;
 class nsISelectControlFrame;
 class nsPresState;
 
 namespace mozilla {
@@ -154,16 +155,21 @@ public:
   bool Autofocus() const
   {
     return GetBoolAttr(nsGkAtoms::autofocus);
   }
   void SetAutofocus(bool aVal, ErrorResult& aRv)
   {
     SetHTMLBoolAttr(nsGkAtoms::autofocus, aVal, aRv);
   }
+  void GetAutocomplete(DOMString& aValue);
+  void SetAutocomplete(const nsAString& aValue, ErrorResult& aRv)
+  {
+    SetHTMLAttr(nsGkAtoms::autocomplete, aValue, aRv);
+  }
   bool Disabled() const
   {
     return GetBoolAttr(nsGkAtoms::disabled);
   }
   void SetDisabled(bool aVal, ErrorResult& aRv)
   {
     SetHTMLBoolAttr(nsGkAtoms::disabled, aVal, aRv);
   }
@@ -600,16 +606,17 @@ protected:
       return true;
     }
 
     return mSelectionHasChanged;
   }
 
   /** The options[] array */
   nsRefPtr<HTMLOptionsCollection> mOptions;
+  nsContentUtils::AutocompleteAttrState mAutocompleteAttrState;
   /** false if the parser is in the middle of adding children. */
   bool            mIsDoneAddingChildren;
   /** true if our disabled state has changed from the default **/
   bool            mDisabledChanged;
   /** true if child nodes are being added or removed.
    *  Used by SafeOptionListMutation.
    */
   bool            mMutating;
--- a/content/html/content/test/forms/test_input_autocomplete.html
+++ b/content/html/content/test/forms/test_input_autocomplete.html
@@ -8,17 +8,18 @@ Test @autocomplete on <input>
   <script src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" href="/tests/SimpleTest/test.css"/>
 </head>
 
 <body>
 <p id="display"></p>
 <div id="content" style="display: none">
   <form>
-    <input id="field" />
+    <input id="input-field" />
+    <select id="select-field" />
   </form>
 </div>
 <pre id="test">
 <script>
 "use strict";
 
 var values = [
   // @autocomplete content attribute, expected IDL attribute value
@@ -64,38 +65,41 @@ var values = [
   // Four tokens (invalid)
   ["billing billing mobile tel", ""],
 
   // Five tokens (invalid)
   ["billing billing billing mobile tel", ""],
 ];
 
 var types = [undefined, "hidden", "text", "search"]; // Valid types for all non-multiline hints.
-var field = document.getElementById("field");
 
-function checkAutocompleteValues(type) {
+function checkAutocompleteValues(field, type) {
   for (var test of values) {
     if (typeof(test[0]) === "undefined")
       field.removeAttribute("autocomplete");
     else
       field.setAttribute("autocomplete", test[0]);
     ise(field.autocomplete, test[1], "Checking @autocomplete for @type=" + type + " of: " + test[0]);
     ise(field.autocomplete, test[1], "Checking cached @autocomplete for @type=" + type + " of: " + test[0]);
   }
 }
 
 function start() {
+  var inputField = document.getElementById("input-field");
   for (var type of types) {
     // Switch the input type
     if (typeof(type) === "undefined")
-      field.removeAttribute("type");
+      inputField.removeAttribute("type");
     else
-      field.type = type;
-    checkAutocompleteValues(type || "");
+      inputField.type = type;
+    checkAutocompleteValues(inputField, type || "");
   }
+
+  var selectField = document.getElementById("select-field");
+  checkAutocompleteValues(selectField, "select");
   SimpleTest.finish();
 }
 
 SimpleTest.waitForExplicitFinish();
 SpecialPowers.pushPrefEnv({"set": [["dom.forms.autocomplete.experimental", true]]}, start);
 
 </script>
 </pre>
--- a/content/html/content/test/test_bug615833.html
+++ b/content/html/content/test/test_bug615833.html
@@ -55,87 +55,99 @@ function checkChangeEvent(aEvent)
 selectMultiple.addEventListener("change", function(aEvent) {
   selectMultiple.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   SimpleTest.finish();
 }, false);
 
 selectMultiple.addEventListener("focus", function() {
   selectMultiple.removeEventListener("focus", arguments.callee, false);
-  synthesizeMouseAtCenter(selectMultiple, {});
+  SimpleTest.executeSoon(function () {
+    synthesizeMouseAtCenter(selectMultiple, {});
+  });
 }, false);
 
 select.addEventListener("change", function(aEvent) {
   select.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   selectMultiple.focus();
 }, false);
 
 select.addEventListener("keyup", function() {
   select.removeEventListener("keyup", arguments.callee, false);
   select.blur();
 }, false);
 
 select.addEventListener("focus", function() {
   select.removeEventListener("focus", arguments.callee, false);
-  synthesizeKey("VK_DOWN", {});
+  SimpleTest.executeSoon(function () {
+    synthesizeKey("VK_DOWN", {});
+  });
 }, false);
 
 checkbox.addEventListener("change", function(aEvent) {
   checkbox.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   select.focus();
 }, false);
 
 checkbox.addEventListener("focus", function() {
   checkbox.removeEventListener("focus", arguments.callee, false);
-  synthesizeMouseAtCenter(checkbox, {});
+  SimpleTest.executeSoon(function () {
+    synthesizeMouseAtCenter(checkbox, {});
+  });
 }, false);
 
 radio.addEventListener("change", function(aEvent) {
   radio.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   checkbox.focus();
 }, false);
 
 radio.addEventListener("focus", function() {
   radio.removeEventListener("focus", arguments.callee, false);
-  synthesizeMouseAtCenter(radio, {});
+  SimpleTest.executeSoon(function () {
+    synthesizeMouseAtCenter(radio, {});
+  });
 }, false);
 
 textarea.addEventListener("change", function(aEvent) {
   textarea.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   radio.focus();
 }, false);
 
 textarea.addEventListener("input", function() {
   textarea.removeEventListener("input", arguments.callee, false);
   textarea.blur();
 }, false);
 
 textarea.addEventListener("focus", function() {
   textarea.removeEventListener("focus", arguments.callee, false);
-  synthesizeKey('f', {});
+  SimpleTest.executeSoon(function () {
+    synthesizeKey('f', {});
+  });
 }, false);
 
 input.addEventListener("change", function(aEvent) {
   input.removeEventListener("change", arguments.callee, false);
   checkChangeEvent(aEvent);
   textarea.focus();
 }, false);
 
 input.addEventListener("input", function() {
   input.removeEventListener("input", arguments.callee, false);
   input.blur();
 }, false);
 
 input.addEventListener("focus", function() {
   input.removeEventListener("focus", arguments.callee, false);
-  synthesizeKey('f', {});
+  SimpleTest.executeSoon(function () {
+    synthesizeKey('f', {});
+  });
 }, false);
 
 SimpleTest.waitForExplicitFinish();
 SimpleTest.waitForFocus(function() {
   input.focus();
 });
 
 </script>
--- a/content/media/MediaData.h
+++ b/content/media/MediaData.h
@@ -32,32 +32,37 @@ public:
   MediaData(Type aType,
             int64_t aOffset,
             int64_t aTimestamp,
             int64_t aDuration)
     : mType(aType)
     , mOffset(aOffset)
     , mTime(aTimestamp)
     , mDuration(aDuration)
+    , mDiscontinuity(false)
   {}
 
   virtual ~MediaData() {}
 
   // Type of contained data.
   const Type mType;
 
   // Approximate byte offset where this data was demuxed from its media.
   const int64_t mOffset;
 
   // Start time of sample, in microseconds.
   const int64_t mTime;
 
   // Duration of sample, in microseconds.
   const int64_t mDuration;
 
+  // True if this is the first sample after a gap or discontinuity in
+  // the stream. This is true for the first sample in a stream after a seek.
+  bool mDiscontinuity;
+
   int64_t GetEndTime() const { return mTime + mDuration; }
 
 };
 
 // Holds chunk a decoded audio frames.
 class AudioData : public MediaData {
 public:
 
@@ -202,17 +207,17 @@ public:
   // specified timestamp. All data from aOther is copied into the new
   // VideoData, as ShallowCopyUpdateDuration() does.
   static VideoData* ShallowCopyUpdateTimestamp(VideoData* aOther,
                                                int64_t aTimestamp);
 
   // Initialize PlanarYCbCrImage. Only When aCopyData is true,
   // video data is copied to PlanarYCbCrImage.
   static void SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
-                                  VideoInfo& aInfo,                  
+                                  VideoInfo& aInfo,
                                   const YCbCrBuffer &aBuffer,
                                   const IntRect& aPicture,
                                   bool aCopyData);
 
   // Constructs a duplicate VideoData object. This intrinsically tells the
   // player that it does not need to update the displayed frame when this
   // frame is played; this frame is identical to the previous.
   static VideoData* CreateDuplicate(int64_t aOffset,
new file mode 100644
--- /dev/null
+++ b/content/media/MediaDataDecodedListener.h
@@ -0,0 +1,148 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaDataDecodedListener_h_
+#define MediaDataDecodedListener_h_
+
+#include "mozilla/Monitor.h"
+#include "MediaDecoderReader.h"
+
+namespace mozilla {
+
+class MediaDecoderStateMachine;
+class MediaData;
+
+// A RequestSampleCallback implementation that forwards samples onto the
+// MediaDecoderStateMachine via tasks that run on the supplied task queue.
+template<class Target>
+class MediaDataDecodedListener : public RequestSampleCallback {
+public:
+  MediaDataDecodedListener(Target* aTarget,
+                           MediaTaskQueue* aTaskQueue)
+    : mMonitor("MediaDataDecodedListener")
+    , mTaskQueue(aTaskQueue)
+    , mTarget(aTarget)
+  {
+    MOZ_ASSERT(aTarget);
+    MOZ_ASSERT(aTaskQueue);
+  }
+
+  virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE {
+    MonitorAutoLock lock(mMonitor);
+    nsAutoPtr<AudioData> sample(aSample);
+    if (!mTarget || !mTaskQueue) {
+      // We've been shutdown, abort.
+      return;
+    }
+    RefPtr<nsIRunnable> task(new DeliverAudioTask(sample.forget(), mTarget));
+    mTaskQueue->Dispatch(task);
+  }
+
+  virtual void OnAudioEOS() MOZ_OVERRIDE {
+    MonitorAutoLock lock(mMonitor);
+    if (!mTarget || !mTaskQueue) {
+      // We've been shutdown, abort.
+      return;
+    }
+    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnAudioEOS));
+    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
+      NS_WARNING("Failed to dispatch OnAudioEOS task");
+    }
+  }
+
+  virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {
+    MonitorAutoLock lock(mMonitor);
+    nsAutoPtr<VideoData> sample(aSample);
+    if (!mTarget || !mTaskQueue) {
+      // We've been shutdown, abort.
+      return;
+    }
+    RefPtr<nsIRunnable> task(new DeliverVideoTask(sample.forget(), mTarget));
+    mTaskQueue->Dispatch(task);
+  }
+
+  virtual void OnVideoEOS() MOZ_OVERRIDE {
+    MonitorAutoLock lock(mMonitor);
+    if (!mTarget || !mTaskQueue) {
+      // We've been shutdown, abort.
+      return;
+    }
+    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnVideoEOS));
+    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
+      NS_WARNING("Failed to dispatch OnVideoEOS task");
+    }
+  }
+
+  virtual void OnDecodeError() MOZ_OVERRIDE {
+    MonitorAutoLock lock(mMonitor);
+    if (!mTarget || !mTaskQueue) {
+      // We've been shutdown, abort.
+      return;
+    }
+    RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnDecodeError));
+    if (NS_FAILED(mTaskQueue->Dispatch(task))) {
+      NS_WARNING("Failed to dispatch OnAudioDecoded task");
+    }
+  }
+
+  void BreakCycles() {
+    MonitorAutoLock lock(mMonitor);
+    mTarget = nullptr;
+    mTaskQueue = nullptr;
+  }
+
+private:
+
+  class DeliverAudioTask : public nsRunnable {
+  public:
+    DeliverAudioTask(AudioData* aSample, Target* aTarget)
+      : mSample(aSample)
+      , mTarget(aTarget)
+    {
+      MOZ_COUNT_CTOR(DeliverAudioTask);
+    }
+    ~DeliverAudioTask()
+    {
+      MOZ_COUNT_DTOR(DeliverAudioTask);
+    }
+    NS_METHOD Run() {
+      mTarget->OnAudioDecoded(mSample.forget());
+      return NS_OK;
+    }
+  private:
+    nsAutoPtr<AudioData> mSample;
+    RefPtr<Target> mTarget;
+  };
+
+  class DeliverVideoTask : public nsRunnable {
+  public:
+    DeliverVideoTask(VideoData* aSample, Target* aTarget)
+      : mSample(aSample)
+      , mTarget(aTarget)
+    {
+      MOZ_COUNT_CTOR(DeliverVideoTask);
+    }
+    ~DeliverVideoTask()
+    {
+      MOZ_COUNT_DTOR(DeliverVideoTask);
+    }
+    NS_METHOD Run() {
+      mTarget->OnVideoDecoded(mSample.forget());
+      return NS_OK;
+    }
+  private:
+    nsAutoPtr<VideoData> mSample;
+    RefPtr<Target> mTarget;
+  };
+
+  Monitor mMonitor;
+  RefPtr<MediaTaskQueue> mTaskQueue;
+  RefPtr<Target> mTarget;
+};
+
+}
+
+#endif // MediaDataDecodedListener_h_
--- a/content/media/MediaDecoder.cpp
+++ b/content/media/MediaDecoder.cpp
@@ -1523,17 +1523,17 @@ bool MediaDecoder::IsShutdown() const {
 }
 
 int64_t MediaDecoder::GetEndMediaTime() const {
   NS_ENSURE_TRUE(GetStateMachine(), -1);
   return GetStateMachine()->GetEndMediaTime();
 }
 
 // Drop reference to state machine.  Only called during shutdown dance.
-void MediaDecoder::ReleaseStateMachine() {
+void MediaDecoder::BreakCycles() {
   mDecoderStateMachine = nullptr;
 }
 
 MediaDecoderOwner* MediaDecoder::GetMediaOwner() const
 {
   return mOwner;
 }
 
--- a/content/media/MediaDecoder.h
+++ b/content/media/MediaDecoder.h
@@ -1,19 +1,19 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 /*
 Each video element based on MediaDecoder has a state machine to manage
 its play state and keep the current frame up to date. All state machines
-share time in a single shared thread. Each decoder also has one thread
-dedicated to decoding audio and video data. This thread is shutdown when
-playback is paused. Each decoder also has a thread to push decoded audio
+share time in a single shared thread. Each decoder also has a MediaTaskQueue
+running in a SharedThreadPool to decode audio and video data.
+Each decoder also has a thread to push decoded audio
 to the hardware. This thread is not created until playback starts, but
 currently is not destroyed when paused, only when playback ends.
 
 The decoder owns the resources for downloading the media file, and the
 high level state. It holds an owning reference to the state machine that
 owns all the resources related to decoding data, and manages the low level
 decoding operations and A/V sync.
 
@@ -229,16 +229,21 @@ struct SeekTarget {
     , mType(SeekTarget::Invalid)
   {
   }
   SeekTarget(int64_t aTimeUsecs, Type aType)
     : mTime(aTimeUsecs)
     , mType(aType)
   {
   }
+  SeekTarget(const SeekTarget& aOther)
+    : mTime(aOther.mTime)
+    , mType(aOther.mType)
+  {
+  }
   bool IsValid() const {
     return mType != SeekTarget::Invalid;
   }
   void Reset() {
     mTime = -1;
     mType = SeekTarget::Invalid;
   }
   // Seek target time in microseconds.
@@ -819,17 +824,17 @@ public:
   // Updates the approximate byte offset which playback has reached. This is
   // used to calculate the readyState transitions.
   void UpdatePlaybackOffset(int64_t aOffset);
 
   // Provide access to the state machine object
   MediaDecoderStateMachine* GetStateMachine() const;
 
   // Drop reference to state machine.  Only called during shutdown dance.
-  virtual void ReleaseStateMachine();
+  virtual void BreakCycles();
 
   // Notifies the element that decoding has failed.
   virtual void DecodeError();
 
   // Indicate whether the media is same-origin with the element.
   void UpdateSameOriginStatus(bool aSameOrigin);
 
   MediaDecoderOwner* GetOwner() MOZ_OVERRIDE;
--- a/content/media/MediaDecoderReader.cpp
+++ b/content/media/MediaDecoderReader.cpp
@@ -58,19 +58,21 @@ public:
     mSize += audioData->SizeOfIncludingThis(MallocSizeOf);
     return nullptr;
   }
 
   size_t mSize;
 };
 
 MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
-  : mAudioCompactor(mAudioQueue),
-    mDecoder(aDecoder),
-    mIgnoreAudioOutputFormat(false)
+  : mAudioCompactor(mAudioQueue)
+  , mDecoder(aDecoder)
+  , mIgnoreAudioOutputFormat(false)
+  , mAudioDiscontinuity(false)
+  , mVideoDiscontinuity(false)
 {
   MOZ_COUNT_CTOR(MediaDecoderReader);
 }
 
 MediaDecoderReader::~MediaDecoderReader()
 {
   ResetDecode();
   MOZ_COUNT_DTOR(MediaDecoderReader);
@@ -92,16 +94,19 @@ size_t MediaDecoderReader::SizeOfAudioQu
 
 nsresult MediaDecoderReader::ResetDecode()
 {
   nsresult res = NS_OK;
 
   VideoQueue().Reset();
   AudioQueue().Reset();
 
+  mAudioDiscontinuity = true;
+  mVideoDiscontinuity = true;
+
   return res;
 }
 
 VideoData* MediaDecoderReader::DecodeToFirstVideoData()
 {
   bool eof = false;
   while (!eof && VideoQueue().GetSize() == 0) {
     {
@@ -168,186 +173,193 @@ VideoData* MediaDecoderReader::FindStart
   int64_t startTime = std::min(videoStartTime, audioStartTime);
   if (startTime != INT64_MAX) {
     aOutStartTime = startTime;
   }
 
   return videoData;
 }
 
-nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
-{
-  DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));
-
-  // Decode forward to the target frame. Start with video, if we have it.
-  if (HasVideo()) {
-    // Note: when decoding hits the end of stream we must keep the last frame
-    // in the video queue so that we'll have something to display after the
-    // seek completes. This makes our logic a bit messy.
-    bool eof = false;
-    nsAutoPtr<VideoData> video;
-    while (HasVideo() && !eof) {
-      while (VideoQueue().GetSize() == 0 && !eof) {
-        bool skip = false;
-        eof = !DecodeVideoFrame(skip, 0);
-        {
-          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
-          if (mDecoder->IsShutdown()) {
-            return NS_ERROR_FAILURE;
-          }
-        }
-      }
-      if (eof) {
-        // Hit end of file, we want to display the last frame of the video.
-        if (video) {
-          DECODER_LOG(PR_LOG_DEBUG,
-            ("MediaDecoderReader::DecodeToTarget(%lld) repushing video frame [%lld, %lld] at EOF",
-            aTarget, video->mTime, video->GetEndTime()));
-          VideoQueue().PushFront(video.forget());
-        }
-        VideoQueue().Finish();
-        break;
-      }
-      video = VideoQueue().PeekFront();
-      // If the frame end time is less than the seek target, we won't want
-      // to display this frame after the seek, so discard it.
-      if (video && video->GetEndTime() <= aTarget) {
-        DECODER_LOG(PR_LOG_DEBUG,
-                    ("MediaDecoderReader::DecodeToTarget(%lld) pop video frame [%lld, %lld]",
-                     aTarget, video->mTime, video->GetEndTime()));
-        VideoQueue().PopFront();
-      } else {
-        // Found a frame after or encompasing the seek target.
-        if (aTarget >= video->mTime && video->GetEndTime() >= aTarget) {
-          // The seek target lies inside this frame's time slice. Adjust the frame's
-          // start time to match the seek target. We do this by replacing the
-          // first frame with a shallow copy which has the new timestamp.
-          VideoQueue().PopFront();
-          VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, aTarget);
-          video = temp;
-          VideoQueue().PushFront(video);
-        }
-        DECODER_LOG(PR_LOG_DEBUG,
-                    ("MediaDecoderReader::DecodeToTarget(%lld) found target video frame [%lld,%lld]",
-                     aTarget, video->mTime, video->GetEndTime()));
-
-        video.forget();
-        break;
-      }
-    }
-    {
-      ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
-      if (mDecoder->IsShutdown()) {
-        return NS_ERROR_FAILURE;
-      }
-    }
-#ifdef PR_LOGGING
-    const VideoData* front =  VideoQueue().PeekFront();
-    DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld",
-                front ? front->mTime : -1));
-#endif
-  }
-
-  if (HasAudio()) {
-    // Decode audio forward to the seek target.
-    bool eof = false;
-    while (HasAudio() && !eof) {
-      while (!eof && AudioQueue().GetSize() == 0) {
-        eof = !DecodeAudioData();
-        {
-          ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
-          if (mDecoder->IsShutdown()) {
-            return NS_ERROR_FAILURE;
-          }
-        }
-      }
-      const AudioData* audio = AudioQueue().PeekFront();
-      if (!audio || eof) {
-        AudioQueue().Finish();
-        break;
-      }
-      CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
-      CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
-      if (!startFrame.isValid() || !targetFrame.isValid()) {
-        return NS_ERROR_FAILURE;
-      }
-      if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
-        // Our seek target lies after the frames in this AudioData. Pop it
-        // off the queue, and keep decoding forwards.
-        delete AudioQueue().PopFront();
-        audio = nullptr;
-        continue;
-      }
-      if (startFrame.value() > targetFrame.value()) {
-        // The seek target doesn't lie in the audio block just after the last
-        // audio frames we've seen which were before the seek target. This
-        // could have been the first audio data we've seen after seek, i.e. the
-        // seek terminated after the seek target in the audio stream. Just
-        // abort the audio decode-to-target, the state machine will play
-        // silence to cover the gap. Typically this happens in poorly muxed
-        // files.
-        NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
-        break;
-      }
-
-      // The seek target lies somewhere in this AudioData's frames, strip off
-      // any frames which lie before the seek target, so we'll begin playback
-      // exactly at the seek target.
-      NS_ASSERTION(targetFrame.value() >= startFrame.value(),
-                   "Target must at or be after data start.");
-      NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
-                   "Data must end after target.");
-
-      int64_t framesToPrune = targetFrame.value() - startFrame.value();
-      if (framesToPrune > audio->mFrames) {
-        // We've messed up somehow. Don't try to trim frames, the |frames|
-        // variable below will overflow.
-        NS_WARNING("Can't prune more frames that we have!");
-        break;
-      }
-      uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
-      uint32_t channels = audio->mChannels;
-      nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
-      memcpy(audioData.get(),
-             audio->mAudioData.get() + (framesToPrune * channels),
-             frames * channels * sizeof(AudioDataValue));
-      CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
-      if (!duration.isValid()) {
-        return NS_ERROR_FAILURE;
-      }
-      nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
-                                              aTarget,
-                                              duration.value(),
-                                              frames,
-                                              audioData.forget(),
-                                              channels));
-      delete AudioQueue().PopFront();
-      AudioQueue().PushFront(data.forget());
-      break;
-    }
-  }
-
-#ifdef PR_LOGGING
-  const VideoData* v = VideoQueue().PeekFront();
-  const AudioData* a = AudioQueue().PeekFront();
-  DECODER_LOG(PR_LOG_DEBUG,
-              ("MediaDecoderReader::DecodeToTarget(%lld) finished v=%lld a=%lld",
-              aTarget, v ? v->mTime : -1, a ? a->mTime : -1));
-#endif
-
-  return NS_OK;
-}
-
 nsresult
 MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
                                 int64_t aStartTime)
 {
   MediaResource* stream = mDecoder->GetResource();
   int64_t durationUs = 0;
   {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     durationUs = mDecoder->GetMediaDuration();
   }
   GetEstimatedBufferedTimeRanges(stream, durationUs, aBuffered);
   return NS_OK;
 }
 
+class RequestVideoWithSkipTask : public nsRunnable {
+public:
+  RequestVideoWithSkipTask(MediaDecoderReader* aReader,
+                           int64_t aTimeThreshold)
+    : mReader(aReader)
+    , mTimeThreshold(aTimeThreshold)
+  {
+  }
+  NS_METHOD Run() {
+    bool skip = true;
+    mReader->RequestVideoData(skip, mTimeThreshold);
+    return NS_OK;
+  }
+private:
+  nsRefPtr<MediaDecoderReader> mReader;
+  int64_t mTimeThreshold;
+};
+
+void
+MediaDecoderReader::RequestVideoData(bool aSkipToNextKeyframe,
+                                     int64_t aTimeThreshold)
+{
+  bool skip = aSkipToNextKeyframe;
+  while (VideoQueue().GetSize() == 0 &&
+         !VideoQueue().IsFinished()) {
+    if (!DecodeVideoFrame(skip, aTimeThreshold)) {
+      VideoQueue().Finish();
+    } else if (skip) {
+      // We still need to decode more data in order to skip to the next
+      // keyframe. Post another task to the decode task queue to decode
+      // again. We don't just decode straight in a loop here, as that
+      // would hog the decode task queue.
+      RefPtr<nsIRunnable> task(new RequestVideoWithSkipTask(this, aTimeThreshold));
+      mTaskQueue->Dispatch(task);
+      return;
+    }
+  }
+  if (VideoQueue().GetSize() > 0) {
+    VideoData* v = VideoQueue().PopFront();
+    if (v && mVideoDiscontinuity) {
+      v->mDiscontinuity = true;
+      mVideoDiscontinuity = false;
+    }
+    GetCallback()->OnVideoDecoded(v);
+  } else if (VideoQueue().IsFinished()) {
+    GetCallback()->OnVideoEOS();
+  }
+}
+
+void
+MediaDecoderReader::RequestAudioData()
+{
+  while (AudioQueue().GetSize() == 0 &&
+         !AudioQueue().IsFinished()) {
+    if (!DecodeAudioData()) {
+      AudioQueue().Finish();
+    }
+  }
+  if (AudioQueue().GetSize() > 0) {
+    AudioData* a = AudioQueue().PopFront();
+    if (mAudioDiscontinuity) {
+      a->mDiscontinuity = true;
+      mAudioDiscontinuity = false;
+    }
+    GetCallback()->OnAudioDecoded(a);
+    return;
+  } else if (AudioQueue().IsFinished()) {
+    GetCallback()->OnAudioEOS();
+    return;
+  }
+}
+
+void
+MediaDecoderReader::SetCallback(RequestSampleCallback* aCallback)
+{
+  mSampleDecodedCallback = aCallback;
+}
+
+void
+MediaDecoderReader::SetTaskQueue(MediaTaskQueue* aTaskQueue)
+{
+  mTaskQueue = aTaskQueue;
+}
+
+void
+MediaDecoderReader::BreakCycles()
+{
+  if (mSampleDecodedCallback) {
+    mSampleDecodedCallback->BreakCycles();
+    mSampleDecodedCallback = nullptr;
+  }
+  mTaskQueue = nullptr;
+}
+
+void
+MediaDecoderReader::Shutdown()
+{
+  ReleaseMediaResources();
+}
+
+AudioDecodeRendezvous::AudioDecodeRendezvous()
+  : mMonitor("AudioDecodeRendezvous")
+  , mHaveResult(false)
+{
+}
+
+AudioDecodeRendezvous::~AudioDecodeRendezvous()
+{
+}
+
+void
+AudioDecodeRendezvous::OnAudioDecoded(AudioData* aSample)
+{
+  MonitorAutoLock mon(mMonitor);
+  mSample = aSample;
+  mStatus = NS_OK;
+  mHaveResult = true;
+  mon.NotifyAll();
+}
+
+void
+AudioDecodeRendezvous::OnAudioEOS()
+{
+  MonitorAutoLock mon(mMonitor);
+  mSample = nullptr;
+  mStatus = NS_OK;
+  mHaveResult = true;
+  mon.NotifyAll();
+}
+
+void
+AudioDecodeRendezvous::OnDecodeError()
+{
+  MonitorAutoLock mon(mMonitor);
+  mSample = nullptr;
+  mStatus = NS_ERROR_FAILURE;
+  mHaveResult = true;
+  mon.NotifyAll();
+}
+
+void
+AudioDecodeRendezvous::Reset()
+{
+  MonitorAutoLock mon(mMonitor);
+  mHaveResult = false;
+  mStatus = NS_OK;
+  mSample = nullptr;
+}
+
+nsresult
+AudioDecodeRendezvous::Await(nsAutoPtr<AudioData>& aSample)
+{
+  MonitorAutoLock mon(mMonitor);
+  while (!mHaveResult) {
+    mon.Wait();
+  }
+  mHaveResult = false;
+  aSample = mSample;
+  return mStatus;
+}
+
+void
+AudioDecodeRendezvous::Cancel()
+{
+  MonitorAutoLock mon(mMonitor);
+  mStatus = NS_ERROR_ABORT;
+  mHaveResult = true;
+  mon.NotifyAll();
+}
+
 } // namespace mozilla
--- a/content/media/MediaDecoderReader.h
+++ b/content/media/MediaDecoderReader.h
@@ -13,63 +13,95 @@
 #include "AudioCompactor.h"
 
 namespace mozilla {
 
 namespace dom {
 class TimeRanges;
 }
 
-// Encapsulates the decoding and reading of media data. Reading can only be
-// done on the decode thread. Never hold the decoder monitor when
-// calling into this class. Unless otherwise specified, methods and fields of
-// this class can only be accessed on the decode thread.
+class RequestSampleCallback;
+
+// Encapsulates the decoding and reading of media data. Reading can either
+// synchronous and done on the calling "decode" thread, or asynchronous and
+// performed on a background thread, with the result being returned by
+// callback. Never hold the decoder monitor when calling into this class.
+// Unless otherwise specified, methods and fields of this class can only
+// be accessed on the decode task queue.
 class MediaDecoderReader {
 public:
+
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderReader)
+
   MediaDecoderReader(AbstractMediaDecoder* aDecoder);
   virtual ~MediaDecoderReader();
 
   // Initializes the reader, returns NS_OK on success, or NS_ERROR_FAILURE
   // on failure.
   virtual nsresult Init(MediaDecoderReader* aCloneDonor) = 0;
 
   // True if this reader is waiting media resource allocation
   virtual bool IsWaitingMediaResources() { return false; }
   // True when this reader need to become dormant state
   virtual bool IsDormantNeeded() { return false; }
   // Release media resources they should be released in dormant state
+  // The reader can be made usable again by calling ReadMetadata().
   virtual void ReleaseMediaResources() {};
-  // Release the decoder during shutdown
-  virtual void ReleaseDecoder() {};
+  // Breaks reference-counted cycles. Called during shutdown.
+  // WARNING: If you override this, you must call the base implementation
+  // in your override.
+  virtual void BreakCycles();
+
+  // Destroys the decoding state. The reader cannot be made usable again.
+  // This is different from ReleaseMediaResources() as it is irreversable,
+  // whereas ReleaseMediaResources() is.
+  virtual void Shutdown();
+
+  virtual void SetCallback(RequestSampleCallback* aDecodedSampleCallback);
+  virtual void SetTaskQueue(MediaTaskQueue* aTaskQueue);
 
   // Resets all state related to decoding, emptying all buffers etc.
+  // Cancels all pending Request*Data() request callbacks, and flushes the
+  // decode pipeline. The decoder must not call any of the callbacks for
+  // outstanding Request*Data() calls after this is called. Calls to
+  // Request*Data() made after this should be processed as usual.
+  // Normally this call preceedes a Seek() call, or shutdown.
+  // The first samples of every stream produced after a ResetDecode() call
+  // *must* be marked as "discontinuities". If it's not, seeking work won't
+  // properly!
   virtual nsresult ResetDecode();
 
-  // Decodes an unspecified amount of audio data, enqueuing the audio data
-  // in mAudioQueue. Returns true when there's more audio to decode,
-  // false if the audio is finished, end of file has been reached,
-  // or an un-recoverable read error has occured.
-  virtual bool DecodeAudioData() = 0;
+  // Requests the Reader to call OnAudioDecoded() on aCallback with one
+  // audio sample. The decode should be performed asynchronously, and
+  // the callback can be performed on any thread. Don't hold the decoder
+  // monitor while calling this, as the implementation may try to wait
+  // on something that needs the monitor and deadlock.
+  virtual void RequestAudioData();
 
-  // Reads and decodes one video frame. Packets with a timestamp less
-  // than aTimeThreshold will be decoded (unless they're not keyframes
-  // and aKeyframeSkip is true), but will not be added to the queue.
-  virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
-                                int64_t aTimeThreshold) = 0;
+  // Requests the Reader to call OnVideoDecoded() on aCallback with one
+  // video sample. The decode should be performed asynchronously, and
+  // the callback can be performed on any thread. Don't hold the decoder
+  // monitor while calling this, as the implementation may try to wait
+  // on something that needs the monitor and deadlock.
+  // If aSkipToKeyframe is true, the decode should skip ahead to the
+  // the next keyframe at or after aTimeThreshold microseconds.
+  virtual void RequestVideoData(bool aSkipToNextKeyframe,
+                                int64_t aTimeThreshold);
 
   virtual bool HasAudio() = 0;
   virtual bool HasVideo() = 0;
 
   // Read header data for all bitstreams in the file. Fills aInfo with
   // the data required to present the media, and optionally fills *aTags
   // with tag metadata from the file.
   // Returns NS_OK on success, or NS_ERROR_FAILURE on failure.
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags) = 0;
 
+  // TODO: DEPRECATED. This uses synchronous decoding.
   // Stores the presentation time of the first frame we'd be able to play if
   // we started playback at the current position. Returns the first video
   // frame, if we have video.
   virtual VideoData* FindStartTime(int64_t& aOutStartTime);
 
   // Moves the decode head to aTime microseconds. aStartTime and aEndTime
   // denote the start and end times of the media in usecs, and aCurrentTime
   // is the current playback position in microseconds.
@@ -93,32 +125,16 @@ public:
   // Tell the reader that the data decoded are not for direct playback, so it
   // can accept more files, in particular those which have more channels than
   // available in the audio output.
   void SetIgnoreAudioOutputFormat()
   {
     mIgnoreAudioOutputFormat = true;
   }
 
-protected:
-  // Queue of audio frames. This queue is threadsafe, and is accessed from
-  // the audio, decoder, state machine, and main threads.
-  MediaQueue<AudioData> mAudioQueue;
-
-  // Queue of video frames. This queue is threadsafe, and is accessed from
-  // the decoder, state machine, and main threads.
-  MediaQueue<VideoData> mVideoQueue;
-
-  // An adapter to the audio queue which first copies data to buffers with
-  // minimal allocation slop and then pushes them to the queue.  This is
-  // useful for decoders working with formats that give awkward numbers of
-  // frames such as mp3.
-  AudioCompactor mAudioCompactor;
-
-public:
   // Populates aBuffered with the time ranges which are buffered. aStartTime
   // must be the presentation time of the first frame in the media, e.g.
   // the media time corresponding to playback time/position 0. This function
   // is called on the main, decode, and state machine threads.
   //
   // This base implementation in MediaDecoderReader estimates the time ranges
   // buffered by interpolating the cached byte ranges with the duration
   // of the media. Reader subclasses should override this method if they
@@ -151,32 +167,144 @@ public:
   // Returns a pointer to the decoder.
   AbstractMediaDecoder* GetDecoder() {
     return mDecoder;
   }
 
   AudioData* DecodeToFirstAudioData();
   VideoData* DecodeToFirstVideoData();
 
-  // Decodes samples until we reach frames required to play at time aTarget
-  // (usecs). This also trims the samples to start exactly at aTarget,
-  // by discarding audio samples and adjusting start times of video frames.
-  nsresult DecodeToTarget(int64_t aTarget);
-
   MediaInfo GetMediaInfo() { return mInfo; }
 
 protected:
 
+  // Overrides of this function should decodes an unspecified amount of
+  // audio data, enqueuing the audio data in mAudioQueue. Returns true
+  // when there's more audio to decode, false if the audio is finished,
+  // end of file has been reached, or an un-recoverable read error has
+  // occured. This function blocks until the decode is complete.
+  virtual bool DecodeAudioData() {
+    return false;
+  }
+
+  // Overrides of this function should read and decodes one video frame.
+  // Packets with a timestamp less than aTimeThreshold will be decoded
+  // (unless they're not keyframes and aKeyframeSkip is true), but will
+  // not be added to the queue. This function blocks until the decode
+  // is complete.
+  virtual bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) {
+    return false;
+  }
+
+  RequestSampleCallback* GetCallback() {
+    MOZ_ASSERT(mSampleDecodedCallback);
+    return mSampleDecodedCallback;
+  }
+
+  virtual MediaTaskQueue* GetTaskQueue() {
+    return mTaskQueue;
+  }
+
+  // Queue of audio frames. This queue is threadsafe, and is accessed from
+  // the audio, decoder, state machine, and main threads.
+  MediaQueue<AudioData> mAudioQueue;
+
+  // Queue of video frames. This queue is threadsafe, and is accessed from
+  // the decoder, state machine, and main threads.
+  MediaQueue<VideoData> mVideoQueue;
+
+  // An adapter to the audio queue which first copies data to buffers with
+  // minimal allocation slop and then pushes them to the queue.  This is
+  // useful for decoders working with formats that give awkward numbers of
+  // frames such as mp3.
+  AudioCompactor mAudioCompactor;
+
   // Reference to the owning decoder object.
   AbstractMediaDecoder* mDecoder;
 
   // Stores presentation info required for playback.
   MediaInfo mInfo;
 
   // Whether we should accept media that we know we can't play
   // directly, because they have a number of channel higher than
   // what we support.
   bool mIgnoreAudioOutputFormat;
+
+private:
+
+  nsRefPtr<RequestSampleCallback> mSampleDecodedCallback;
+
+  nsRefPtr<MediaTaskQueue> mTaskQueue;
+
+  // Flags whether a the next audio/video sample comes after a "gap" or
+  // "discontinuity" in the stream. For example after a seek.
+  bool mAudioDiscontinuity;
+  bool mVideoDiscontinuity;
+};
+
+// Interface that callers to MediaDecoderReader::Request{Audio,Video}Data()
+// must implement to receive the requested samples asynchronously.
+// This object is refcounted, and cycles must be broken by calling
+// BreakCycles() during shutdown.
+class RequestSampleCallback {
+public:
+  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RequestSampleCallback)
+
+  // Receives the result of a RequestAudioData() call.
+  virtual void OnAudioDecoded(AudioData* aSample) = 0;
+
+  // Called when a RequestAudioData() call can't be fulfiled as we've
+  // reached the end of stream.
+  virtual void OnAudioEOS() = 0;
+
+  // Receives the result of a RequestVideoData() call.
+  virtual void OnVideoDecoded(VideoData* aSample) = 0;
+
+  // Called when a RequestVideoData() call can't be fulfiled as we've
+  // reached the end of stream.
+  virtual void OnVideoEOS() = 0;
+
+  // Called when there's a decode error. No more sample requests
+  // will succeed.
+  virtual void OnDecodeError() = 0;
+
+  // Called during shutdown to break any reference cycles.
+  virtual void BreakCycles() = 0;
+
+  virtual ~RequestSampleCallback() {}
+};
+
+// A RequestSampleCallback implementation that can be passed to the
+// MediaDecoderReader to block the thread requesting an audio sample until
+// the audio decode is complete. This is used to adapt the asynchronous
+// model of the MediaDecoderReader to a synchronous model.
+class AudioDecodeRendezvous : public RequestSampleCallback {
+public:
+  AudioDecodeRendezvous();
+  ~AudioDecodeRendezvous();
+
+  // RequestSampleCallback implementation. Called when decode is complete.
+  // Note: aSample is null at end of stream.
+  virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE;
+  virtual void OnAudioEOS() MOZ_OVERRIDE;
+  virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {}
+  virtual void OnVideoEOS() MOZ_OVERRIDE {}
+  virtual void OnDecodeError() MOZ_OVERRIDE;
+  virtual void BreakCycles() MOZ_OVERRIDE {};
+  void Reset();
+
+  // Returns failure on error, or NS_OK.
+  // If *aSample is null, EOS has been reached.
+  nsresult Await(nsAutoPtr<AudioData>& aSample);
+
+  // Interrupts a call to Wait().
+  void Cancel();
+
+private:
+  Monitor mMonitor;
+  nsresult mStatus;
+  nsAutoPtr<AudioData> mSample;
+  bool mHaveResult;
 };
 
 } // namespace mozilla
 
 #endif
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -52,19 +52,26 @@ extern PRLogModuleInfo* gMediaDecoderLog
 #define DECODER_LOG(type, msg, ...) \
   PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__))
 #define VERBOSE_LOG(msg, ...)                          \
     PR_BEGIN_MACRO                                     \
       if (!PR_GetEnv("MOZ_QUIET")) {                   \
         DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
       }                                                \
     PR_END_MACRO
+#define SAMPLE_LOG(msg, ...)                          \
+    PR_BEGIN_MACRO                                     \
+      if (PR_GetEnv("MEDIA_LOG_SAMPLES")) {            \
+        DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
+      }                                                \
+    PR_END_MACRO
 #else
 #define DECODER_LOG(type, msg, ...)
 #define VERBOSE_LOG(msg, ...)
+#define SAMPLE_LOG(msg, ...)
 #endif
 
 // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
 // implementation.  With unified builds, putting this in headers is not enough.
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
@@ -187,32 +194,35 @@ MediaDecoderStateMachine::MediaDecoderSt
   mVideoFrameEndTime(-1),
   mVolume(1.0),
   mPlaybackRate(1.0),
   mPreservesPitch(true),
   mBasePosition(0),
   mAmpleVideoFrames(2),
   mLowAudioThresholdUsecs(LOW_AUDIO_USECS),
   mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS),
-  mDispatchedAudioDecodeTask(false),
-  mDispatchedVideoDecodeTask(false),
+  mAudioRequestPending(false),
+  mVideoRequestPending(false),
   mAudioCaptured(false),
   mTransportSeekable(true),
   mMediaSeekable(true),
   mPositionChangeQueued(false),
   mAudioCompleted(false),
   mGotDurationFromMetaData(false),
   mDispatchedEventToDecode(false),
   mStopAudioThread(true),
   mQuickBuffering(false),
   mMinimizePreroll(false),
   mDecodeThreadWaiting(false),
   mRealTime(aRealTime),
   mDispatchedDecodeMetadataTask(false),
-  mDispatchedDecodeSeekTask(false),
+  mDropAudioUntilNextDiscontinuity(false),
+  mDropVideoUntilNextDiscontinuity(false),
+  mDecodeToSeekTarget(false),
+  mCurrentTimeBeforeSeek(0),
   mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED),
   mTimerId(0)
 {
   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
 
   // Only enable realtime mode when "media.realtime_decoder.enabled" is true.
   if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false)
@@ -553,171 +563,506 @@ bool MediaDecoderStateMachine::HaveEnoug
 }
 
 bool
 MediaDecoderStateMachine::NeedToDecodeVideo()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
-  return mIsVideoDecoding &&
-         !mMinimizePreroll &&
-         !HaveEnoughDecodedVideo();
+  return IsVideoDecoding() &&
+         ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) ||
+          (!mMinimizePreroll && !HaveEnoughDecodedVideo()));
 }
 
 void
 MediaDecoderStateMachine::DecodeVideo()
 {
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-
-  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
-    mDispatchedVideoDecodeTask = false;
-    return;
-  }
-
-  // We don't want to consider skipping to the next keyframe if we've
-  // only just started up the decode loop, so wait until we've decoded
-  // some frames before enabling the keyframe skip logic on video.
-  if (mIsVideoPrerolling &&
-      (static_cast<uint32_t>(VideoQueue().GetSize())
-        >= mVideoPrerollFrames * mPlaybackRate))
-  {
-    mIsVideoPrerolling = false;
-  }
-
-  // We'll skip the video decode to the nearest keyframe if we're low on
-  // audio, or if we're low on video, provided we're not running low on
-  // data to decode. If we're running low on downloaded data to decode,
-  // we won't start keyframe skipping, as we'll be pausing playback to buffer
-  // soon anyway and we'll want to be able to display frames immediately
-  // after buffering finishes.
-  if (mState == DECODER_STATE_DECODING &&
-      !mSkipToNextKeyFrame &&
-      mIsVideoDecoding &&
-      ((!mIsAudioPrerolling && mIsAudioDecoding &&
-        GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
-        (!mIsVideoPrerolling && mIsVideoDecoding &&
-         // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
-         // we are still in the safe range without underrunning video frames
-         GetClock() > mVideoFrameEndTime &&
-        (static_cast<uint32_t>(VideoQueue().GetSize())
-          < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
-      !HasLowUndecodedData())
+  int64_t currentTime = 0;
+  bool skipToNextKeyFrame = false;
   {
-    mSkipToNextKeyFrame = true;
-    DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
-  }
-
-  // Time the video decode, so that if it's slow, we can increase our low
-  // audio threshold to reduce the chance of an audio underrun while we're
-  // waiting for a video decode to complete.
-  TimeDuration decodeTime;
-  {
-    int64_t currentTime = GetMediaTime();
-    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    TimeStamp start = TimeStamp::Now();
-    mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime);
-    decodeTime = TimeStamp::Now() - start;
-  }
-  if (!mIsVideoDecoding) {
-    // Playback ended for this stream, close the sample queue.
-    VideoQueue().Finish();
-    CheckIfDecodeComplete();
+    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+    NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+
+    if (mState != DECODER_STATE_DECODING &&
+        mState != DECODER_STATE_BUFFERING &&
+        mState != DECODER_STATE_SEEKING) {
+      mVideoRequestPending = false;
+      DispatchDecodeTasksIfNeeded();
+      return;
+    }
+
+    // We don't want to consider skipping to the next keyframe if we've
+    // only just started up the decode loop, so wait until we've decoded
+    // some frames before enabling the keyframe skip logic on video.
+    if (mIsVideoPrerolling &&
+        (static_cast<uint32_t>(VideoQueue().GetSize())
+          >= mVideoPrerollFrames * mPlaybackRate))
+    {
+      mIsVideoPrerolling = false;
+    }
+
+    // We'll skip the video decode to the nearest keyframe if we're low on
+    // audio, or if we're low on video, provided we're not running low on
+    // data to decode. If we're running low on downloaded data to decode,
+    // we won't start keyframe skipping, as we'll be pausing playback to buffer
+    // soon anyway and we'll want to be able to display frames immediately
+    // after buffering finishes.
+    if (mState == DECODER_STATE_DECODING &&
+        mIsVideoDecoding &&
+        ((!mIsAudioPrerolling && mIsAudioDecoding &&
+          GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
+          (!mIsVideoPrerolling && IsVideoDecoding() &&
+           // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
+           // we are still in the safe range without underrunning video frames
+           GetClock() > mVideoFrameEndTime &&
+          (static_cast<uint32_t>(VideoQueue().GetSize())
+            < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
+        !HasLowUndecodedData())
+    {
+      skipToNextKeyFrame = true;
+      DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
+    }
+    currentTime = mState == DECODER_STATE_SEEKING ? 0 : GetMediaTime();
+
+    // Time the video decode, so that if it's slow, we can increase our low
+    // audio threshold to reduce the chance of an audio underrun while we're
+    // waiting for a video decode to complete.
+    mVideoDecodeStartTime = TimeStamp::Now();
   }
 
-  if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
-      !HasLowUndecodedData())
-  {
-    mLowAudioThresholdUsecs =
-      std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
-    mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
-                                          mAmpleAudioThresholdUsecs);
-    DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
-                mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
-  }
-
-  SendStreamData();
-
-  // The ready state can change when we've decoded data, so update the
-  // ready state, so that DOM events can fire.
-  UpdateReadyState();
-
-  mDispatchedVideoDecodeTask = false;
-  DispatchDecodeTasksIfNeeded();
+  mReader->RequestVideoData(skipToNextKeyFrame, currentTime);
 }
 
 bool
 MediaDecoderStateMachine::NeedToDecodeAudio()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
-  return mIsAudioDecoding &&
-         !mMinimizePreroll &&
-         !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate);
+  return IsAudioDecoding() &&
+         ((mState == DECODER_STATE_SEEKING && mDecodeToSeekTarget) ||
+          (!mMinimizePreroll &&
+          !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate) &&
+          (mState != DECODER_STATE_SEEKING || mDecodeToSeekTarget)));
 }
 
 void
 MediaDecoderStateMachine::DecodeAudio()
 {
+  {
+    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+    NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+
+    if (mState != DECODER_STATE_DECODING &&
+        mState != DECODER_STATE_BUFFERING &&
+        mState != DECODER_STATE_SEEKING) {
+      mAudioRequestPending = false;
+      DispatchDecodeTasksIfNeeded();
+      mon.NotifyAll();
+      return;
+    }
+
+    // We don't want to consider skipping to the next keyframe if we've
+    // only just started up the decode loop, so wait until we've decoded
+    // some audio data before enabling the keyframe skip logic on audio.
+    if (mIsAudioPrerolling &&
+        GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
+      mIsAudioPrerolling = false;
+    }
+  }
+  mReader->RequestAudioData();
+}
+
+bool
+MediaDecoderStateMachine::IsAudioSeekComplete()
+{
+  AssertCurrentThreadInMonitor();
+  SAMPLE_LOG("IsAudioSeekComplete() curTarVal=%d mAudDis=%d aqFin=%d aqSz=%d",
+    mCurrentSeekTarget.IsValid(), mDropAudioUntilNextDiscontinuity, AudioQueue().IsFinished(), AudioQueue().GetSize());
+  return
+    !HasAudio() ||
+    (mCurrentSeekTarget.IsValid() &&
+     !mDropAudioUntilNextDiscontinuity &&
+     (AudioQueue().IsFinished() || AudioQueue().GetSize() > 0));
+}
+
+bool
+MediaDecoderStateMachine::IsVideoSeekComplete()
+{
+  AssertCurrentThreadInMonitor();
+  SAMPLE_LOG("IsVideoSeekComplete() curTarVal=%d mVidDis=%d vqFin=%d vqSz=%d",
+    mCurrentSeekTarget.IsValid(), mDropVideoUntilNextDiscontinuity, VideoQueue().IsFinished(), VideoQueue().GetSize());
+  return
+    !HasVideo() ||
+    (mCurrentSeekTarget.IsValid() &&
+     !mDropVideoUntilNextDiscontinuity &&
+     (VideoQueue().IsFinished() || VideoQueue().GetSize() > 0));
+}
+
+void
+MediaDecoderStateMachine::OnAudioEOS()
+{
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
-  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-
-  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
-    mDispatchedAudioDecodeTask = false;
+  SAMPLE_LOG("OnAudioEOS");
+  mAudioRequestPending = false;
+  AudioQueue().Finish();
+  switch (mState) {
+    case DECODER_STATE_DECODING_METADATA: {
+      MaybeFinishDecodeMetadata();
+      return;
+    }
+    case DECODER_STATE_BUFFERING:
+    case DECODER_STATE_DECODING: {
+      CheckIfDecodeComplete();
+      SendStreamData();
+      // The ready state can change when we've decoded data, so update the
+      // ready state, so that DOM events can fire.
+      UpdateReadyState();
+      mDecoder->GetReentrantMonitor().NotifyAll();
+      return;
+    }
+
+    case DECODER_STATE_SEEKING: {
+      if (!mCurrentSeekTarget.IsValid()) {
+        // We've received an EOS from a previous decode. Discard it.
+        return;
+      }
+      mDropAudioUntilNextDiscontinuity = false;
+      CheckIfSeekComplete();
+      return;
+    }
+    default: {
+      // Ignore other cases.
+      return;
+    }
+  }
+}
+
+void
+MediaDecoderStateMachine::OnAudioDecoded(AudioData* aAudioSample)
+{
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  nsAutoPtr<AudioData> audio(aAudioSample);
+  MOZ_ASSERT(audio);
+  mAudioRequestPending = false;
+
+  SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
+             (audio ? audio->mTime : -1),
+             (audio ? audio->GetEndTime() : -1),
+             (audio ? audio->mDiscontinuity : 0));
+
+  switch (mState) {
+    case DECODER_STATE_DECODING_METADATA: {
+      Push(audio.forget());
+      MaybeFinishDecodeMetadata();
+      return;
+    }
+
+    case DECODER_STATE_BUFFERING:
+    case DECODER_STATE_DECODING: {
+      // In buffering and decoding state, we simply enqueue samples.
+      Push(audio.forget());
+      return;
+    }
+
+    case DECODER_STATE_SEEKING: {
+      if (!mCurrentSeekTarget.IsValid()) {
+        // We've received a sample from a previous decode. Discard it.
+        return;
+      }
+      if (audio->mDiscontinuity) {
+        mDropAudioUntilNextDiscontinuity = false;
+      }
+      if (!mDropAudioUntilNextDiscontinuity) {
+        // We must be after the discontinuity; we're receiving samples
+        // at or after the seek target.
+        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint &&
+            mCurrentSeekTarget.mTime > mCurrentTimeBeforeSeek &&
+            audio->mTime < mCurrentTimeBeforeSeek) {
+          // We are doing a fastSeek, but we ended up *before* the previous
+          // playback position. This is surprising UX, so switch to an accurate
+          // seek and decode to the seek target. This is not conformant to the
+          // spec, fastSeek should always be fast, but until we get the time to
+          // change all Readers to seek to the keyframe after the currentTime
+          // in this case, we'll just decode forward. Bug 1026330.
+          mCurrentSeekTarget.mType = SeekTarget::Accurate;
+        }
+        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint) {
+          // Non-precise seek; we can stop the seek at the first sample.
+          AudioQueue().Push(audio.forget());
+        } else {
+          // We're doing an accurate seek. We must discard
+          // MediaData up to the one containing exact seek target.
+          if (NS_FAILED(DropAudioUpToSeekTarget(audio.forget()))) {
+            DecodeError();
+            return;
+          }
+        }
+      }
+      CheckIfSeekComplete();
+      return;
+    }
+    default: {
+      // Ignore other cases.
+      return;
+    }
+  }
+}
+
+void
+MediaDecoderStateMachine::Push(AudioData* aSample)
+{
+  MOZ_ASSERT(aSample);
+  // TODO: Send aSample to MSG and recalculate readystate before pushing,
+  // otherwise AdvanceFrame may pop the sample before we have a chance
+  // to reach playing.
+  AudioQueue().Push(aSample);
+  if (mState > DECODER_STATE_DECODING_METADATA) {
+    SendStreamData();
+    // The ready state can change when we've decoded data, so update the
+    // ready state, so that DOM events can fire.
+    UpdateReadyState();
+    DispatchDecodeTasksIfNeeded();
+    mDecoder->GetReentrantMonitor().NotifyAll();
+  }
+}
+
+void
+MediaDecoderStateMachine::Push(VideoData* aSample)
+{
+  MOZ_ASSERT(aSample);
+  // TODO: Send aSample to MSG and recalculate readystate before pushing,
+  // otherwise AdvanceFrame may pop the sample before we have a chance
+  // to reach playing.
+  VideoQueue().Push(aSample);
+  if (mState > DECODER_STATE_DECODING_METADATA) {
+    SendStreamData();
+    // The ready state can change when we've decoded data, so update the
+    // ready state, so that DOM events can fire.
+    UpdateReadyState();
+    DispatchDecodeTasksIfNeeded();
+    mDecoder->GetReentrantMonitor().NotifyAll();
+  }
+}
+
+void
+MediaDecoderStateMachine::OnDecodeError()
+{
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  DecodeError();
+}
+
+void
+MediaDecoderStateMachine::MaybeFinishDecodeMetadata()
+{
+  AssertCurrentThreadInMonitor();
+  if ((IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
+      (IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
     return;
   }
-
-  // We don't want to consider skipping to the next keyframe if we've
-  // only just started up the decode loop, so wait until we've decoded
-  // some audio data before enabling the keyframe skip logic on audio.
-  if (mIsAudioPrerolling &&
-      GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
-    mIsAudioPrerolling = false;
+  if (NS_FAILED(FinishDecodeMetadata())) {
+    DecodeError();
   }
-
-  {
-    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    mIsAudioDecoding = mReader->DecodeAudioData();
+}
+
+void
+MediaDecoderStateMachine::OnVideoEOS()
+{
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  SAMPLE_LOG("OnVideoEOS");
+  mVideoRequestPending = false;
+  switch (mState) {
+    case DECODER_STATE_DECODING_METADATA: {
+      VideoQueue().Finish();
+      MaybeFinishDecodeMetadata();
+      return;
+    }
+
+    case DECODER_STATE_BUFFERING:
+    case DECODER_STATE_DECODING: {
+      VideoQueue().Finish();
+      CheckIfDecodeComplete();
+      SendStreamData();
+      // The ready state can change when we've decoded data, so update the
+      // ready state, so that DOM events can fire.
+      UpdateReadyState();
+      mDecoder->GetReentrantMonitor().NotifyAll();
+      return;
+    }
+    case DECODER_STATE_SEEKING: {
+      if (!mCurrentSeekTarget.IsValid()) {
+        // We've received a sample from a previous decode. Discard it.
+        return;
+      }
+      // Null sample. Hit end of stream. If we have decoded a frame,
+      // insert it into the queue so that we have something to display.
+      if (mFirstVideoFrameAfterSeek) {
+        VideoQueue().Push(mFirstVideoFrameAfterSeek.forget());
+      }
+      VideoQueue().Finish();
+      mDropVideoUntilNextDiscontinuity = false;
+      CheckIfSeekComplete();
+      return;
+    }
+    default: {
+      // Ignore other cases.
+      return;
+    }
   }
-  if (!mIsAudioDecoding) {
-    // Playback ended for this stream, close the sample queue.
-    AudioQueue().Finish();
-    CheckIfDecodeComplete();
+}
+
+void
+MediaDecoderStateMachine::OnVideoDecoded(VideoData* aVideoSample)
+{
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  nsAutoPtr<VideoData> video(aVideoSample);
+  mVideoRequestPending = false;
+
+  SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d",
+             (video ? video->mTime : -1),
+             (video ? video->GetEndTime() : -1),
+             (video ? video->mDiscontinuity : 0));
+
+  switch (mState) {
+    case DECODER_STATE_DECODING_METADATA: {
+      Push(video.forget());
+      MaybeFinishDecodeMetadata();
+      return;
+    }
+
+    case DECODER_STATE_BUFFERING:
+    case DECODER_STATE_DECODING: {
+      Push(video.forget());
+      // If the requested video sample was slow to arrive, increase the
+      // amount of audio we buffer to ensure that we don't run out of audio.
+      // TODO: Detect when we're truly async, and don't do this if so, as
+      // it's not necessary.
+      TimeDuration decodeTime = TimeStamp::Now() - mVideoDecodeStartTime;
+      if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
+          !HasLowUndecodedData())
+      {
+        mLowAudioThresholdUsecs =
+          std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
+        mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
+                                              mAmpleAudioThresholdUsecs);
+        DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
+                    mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
+      }
+      return;
+    }
+    case DECODER_STATE_SEEKING: {
+      if (!mCurrentSeekTarget.IsValid()) {
+        // We've received a sample from a previous decode. Discard it.
+        return;
+      }
+      if (mDropVideoUntilNextDiscontinuity) {
+        if (video->mDiscontinuity) {
+          mDropVideoUntilNextDiscontinuity = false;
+        }
+      }
+      if (!mDropVideoUntilNextDiscontinuity) {
+        // We must be after the discontinuity; we're receiving samples
+        // at or after the seek target.
+        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint &&
+            mCurrentSeekTarget.mTime > mCurrentTimeBeforeSeek &&
+            video->mTime < mCurrentTimeBeforeSeek) {
+          // We are doing a fastSeek, but we ended up *before* the previous
+          // playback position. This is surprising UX, so switch to an accurate
+          // seek and decode to the seek target. This is not conformant to the
+          // spec, fastSeek should always be fast, but until we get the time to
+          // change all Readers to seek to the keyframe after the currentTime
+          // in this case, we'll just decode forward. Bug 1026330.
+          mCurrentSeekTarget.mType = SeekTarget::Accurate;
+        }
+        if (mCurrentSeekTarget.mType == SeekTarget::PrevSyncPoint) {
+          // Non-precise seek; we can stop the seek at the first sample.
+          VideoQueue().Push(video.forget());
+        } else {
+          // We're doing an accurate seek. We still need to discard
+          // MediaData up to the one containing exact seek target.
+          if (NS_FAILED(DropVideoUpToSeekTarget(video.forget()))) {
+            DecodeError();
+            return;
+          }
+        }
+      }
+      CheckIfSeekComplete();
+      return;
+    }
+    default: {
+      // Ignore other cases.
+      return;
+    }
   }
-
-  SendStreamData();
-
-  // Notify to ensure that the AudioLoop() is not waiting, in case it was
-  // waiting for more audio to be decoded.
-  mDecoder->GetReentrantMonitor().NotifyAll();
-
-  // The ready state can change when we've decoded data, so update the
-  // ready state, so that DOM events can fire.
-  UpdateReadyState();
-
-  mDispatchedAudioDecodeTask = false;
-  DispatchDecodeTasksIfNeeded();
+}
+
+void
+MediaDecoderStateMachine::CheckIfSeekComplete()
+{
+  AssertCurrentThreadInMonitor();
+
+  const bool videoSeekComplete = IsVideoSeekComplete();
+  if (HasVideo() && !videoSeekComplete) {
+    // We haven't reached the target. Ensure we have requested another sample.
+    if (NS_FAILED(EnsureVideoDecodeTaskQueued())) {
+      NS_WARNING("Failed to request video during seek");
+      DecodeError();
+    }
+  }
+
+  const bool audioSeekComplete = IsAudioSeekComplete();
+  if (HasAudio() && !audioSeekComplete) {
+    // We haven't reached the target. Ensure we have requested another sample.
+    if (NS_FAILED(EnsureAudioDecodeTaskQueued())) {
+      NS_WARNING("Failed to request audio during seek");
+      DecodeError();
+    }
+  }
+
+  SAMPLE_LOG("CheckIfSeekComplete() audioSeekComplete=%d videoSeekComplete=%d",
+             audioSeekComplete, videoSeekComplete);
+
+  if (audioSeekComplete && videoSeekComplete) {
+    mDecodeToSeekTarget = false;
+    RefPtr<nsIRunnable> task(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SeekCompleted));
+    nsresult rv = mDecodeTaskQueue->Dispatch(task);
+    if (NS_FAILED(rv)) {
+      DecodeError();
+    }
+  }
+}
+
+bool
+MediaDecoderStateMachine::IsAudioDecoding()
+{
+  AssertCurrentThreadInMonitor();
+  return HasAudio() && !AudioQueue().IsFinished();
+}
+
+bool
+MediaDecoderStateMachine::IsVideoDecoding()
+{
+  AssertCurrentThreadInMonitor();
+  return HasVideo() && !VideoQueue().IsFinished();
 }
 
 void
 MediaDecoderStateMachine::CheckIfDecodeComplete()
 {
   AssertCurrentThreadInMonitor();
   if (mState == DECODER_STATE_SHUTDOWN ||
       mState == DECODER_STATE_SEEKING ||
       mState == DECODER_STATE_COMPLETED) {
     // Don't change our state if we've already been shutdown, or we're seeking,
     // since we don't want to abort the shutdown or seek processes.
     return;
   }
-  MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
-  MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
-  if (!mIsVideoDecoding && !mIsAudioDecoding) {
+  if (!IsVideoDecoding() && !IsAudioDecoding()) {
     // We've finished decoding all active streams,
     // so move to COMPLETED state.
     mState = DECODER_STATE_COMPLETED;
     DispatchDecodeTasksIfNeeded();
     ScheduleStateMachine();
   }
   DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted",
               ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT "));
@@ -1010,19 +1355,17 @@ uint32_t MediaDecoderStateMachine::PlayF
   }
   return frames;
 }
 
 nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
-  RefPtr<SharedThreadPool> decodePool(
-    SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
-                          Preferences::GetUint("media.num-decode-threads", 25)));
+  RefPtr<SharedThreadPool> decodePool(GetMediaDecodeThreadPool());
   NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE);
 
   RefPtr<SharedThreadPool> stateMachinePool(
     SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1));
   NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE);
 
   mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget());
   NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE);
@@ -1035,17 +1378,27 @@ nsresult MediaDecoderStateMachine::Init(
   mStateMachineThreadPool = stateMachinePool;
 
   nsresult rv;
   mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv);
   NS_ENSURE_SUCCESS(rv, rv);
   rv = mTimer->SetTarget(GetStateMachineThread());
   NS_ENSURE_SUCCESS(rv, rv);
 
-  return mReader->Init(cloneReader);
+  // Note: This creates a cycle, broken in shutdown.
+  mMediaDecodedListener =
+    new MediaDataDecodedListener<MediaDecoderStateMachine>(this,
+                                                           mDecodeTaskQueue);
+  mReader->SetCallback(mMediaDecodedListener);
+  mReader->SetTaskQueue(mDecodeTaskQueue);
+
+  rv = mReader->Init(cloneReader);
+  NS_ENSURE_SUCCESS(rv, rv);
+
+  return NS_OK;
 }
 
 void MediaDecoderStateMachine::StopPlayback()
 {
   DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()");
 
   AssertCurrentThreadInMonitor();
 
@@ -1097,20 +1450,22 @@ void MediaDecoderStateMachine::StartPlay
   mPlayStartTime = TimeStamp::Now();
 
   NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()");
   if (NS_FAILED(StartAudioThread())) {
     NS_WARNING("Failed to create audio thread");
   }
   mDecoder->GetReentrantMonitor().NotifyAll();
   mDecoder->UpdateStreamBlockingForStateMachinePlaying();
+  DispatchDecodeTasksIfNeeded();
 }
 
 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
 {
+  SAMPLE_LOG("UpdatePlaybackPositionInternal(%lld) (mStartTime=%lld)", aTime, mStartTime);
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine thread.");
   AssertCurrentThreadInMonitor();
 
   NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime");
   mCurrentFrameTime = aTime - mStartTime;
   NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!");
   if (aTime > mEndTime) {
@@ -1313,28 +1668,22 @@ void MediaDecoderStateMachine::StartDeco
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   if (mState == DECODER_STATE_DECODING) {
     return;
   }
   mState = DECODER_STATE_DECODING;
 
   mDecodeStartTime = TimeStamp::Now();
 
-  // Reset our "stream finished decoding" flags, so we try to decode all
-  // streams that we have when we start decoding.
-  mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
-  mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
-
   CheckIfDecodeComplete();
   if (mState == DECODER_STATE_COMPLETED) {
     return;
   }
 
   // Reset other state to pristine values before starting decode.
-  mSkipToNextKeyFrame = false;
   mIsAudioPrerolling = true;
   mIsVideoPrerolling = true;
 
   // Ensure that we've got tasks enqueued to decode data if we need to.
   DispatchDecodeTasksIfNeeded();
 
   ScheduleStateMachine();
 }
@@ -1375,21 +1724,26 @@ void MediaDecoderStateMachine::Play()
   // Once we start playing, we don't want to minimize our prerolling, as we
   // assume the user is likely to want to keep playing in future.
   mMinimizePreroll = false;
   ScheduleStateMachine();
 }
 
 void MediaDecoderStateMachine::ResetPlayback()
 {
-  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+  MOZ_ASSERT(mState == DECODER_STATE_SEEKING || mState == DECODER_STATE_SHUTDOWN);
   mVideoFrameEndTime = -1;
   mAudioStartTime = -1;
   mAudioEndTime = -1;
   mAudioCompleted = false;
+  AudioQueue().Reset();
+  VideoQueue().Reset();
+  mFirstVideoFrameAfterSeek = nullptr;
+  mDropAudioUntilNextDiscontinuity = true;
+  mDropVideoUntilNextDiscontinuity = true;
 }
 
 void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
                                                      uint32_t aLength,
                                                      int64_t aOffset)
 {
   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
   mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
@@ -1479,18 +1833,19 @@ nsresult
 MediaDecoderStateMachine::EnqueueDecodeMetadataTask()
 {
   AssertCurrentThreadInMonitor();
 
   if (mState != DECODER_STATE_DECODING_METADATA ||
       mDispatchedDecodeMetadataTask) {
     return NS_OK;
   }
-  nsresult rv = mDecodeTaskQueue->Dispatch(
+  RefPtr<nsIRunnable> task(
     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata));
+  nsresult rv = mDecodeTaskQueue->Dispatch(task);
   if (NS_SUCCEEDED(rv)) {
     mDispatchedDecodeMetadataTask = true;
   } else {
     NS_WARNING("Dispatch ReadMetadata task failed.");
     return rv;
   }
 
   return NS_OK;
@@ -1511,16 +1866,22 @@ MediaDecoderStateMachine::SetReaderIdle(
   mReader->SetIdle();
 }
 
 void
 MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded()
 {
   AssertCurrentThreadInMonitor();
 
+  if (mState != DECODER_STATE_DECODING &&
+      mState != DECODER_STATE_BUFFERING &&
+      mState != DECODER_STATE_SEEKING) {
+    return;
+  }
+
   // NeedToDecodeAudio() can go from false to true while we hold the
   // monitor, but it can't go from true to false. This can happen because
   // NeedToDecodeAudio() takes into account the amount of decoded audio
   // that's been written to the AudioStream but not played yet. So if we
   // were calling NeedToDecodeAudio() twice and we thread-context switch
   // between the calls, audio can play, which can affect the return value
   // of NeedToDecodeAudio() giving inconsistent results. So we cache the
   // value returned by NeedToDecodeAudio(), and make decisions
@@ -1544,16 +1905,21 @@ MediaDecoderStateMachine::DispatchDecode
 
   if (needToDecodeAudio) {
     EnsureAudioDecodeTaskQueued();
   }
   if (needToDecodeVideo) {
     EnsureVideoDecodeTaskQueued();
   }
 
+  SAMPLE_LOG("DispatchDecodeTasksIfNeeded needAudio=%d dispAudio=%d needVideo=%d dispVid=%d needIdle=%d",
+             needToDecodeAudio, mAudioRequestPending,
+             needToDecodeVideo, mVideoRequestPending,
+             needIdle);
+
   if (needIdle) {
     RefPtr<nsIRunnable> event = NS_NewRunnableMethod(
         this, &MediaDecoderStateMachine::SetReaderIdle);
     nsresult rv = mDecodeTaskQueue->Dispatch(event.forget());
     if (NS_FAILED(rv) && mState != DECODER_STATE_SHUTDOWN) {
       NS_WARNING("Failed to dispatch event to set decoder idle state");
     }
   }
@@ -1562,25 +1928,32 @@ MediaDecoderStateMachine::DispatchDecode
 nsresult
 MediaDecoderStateMachine::EnqueueDecodeSeekTask()
 {
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
   AssertCurrentThreadInMonitor();
 
   if (mState != DECODER_STATE_SEEKING ||
-      mDispatchedDecodeSeekTask) {
+      !mSeekTarget.IsValid() ||
+      mCurrentSeekTarget.IsValid()) {
     return NS_OK;
   }
-  nsresult rv = mDecodeTaskQueue->Dispatch(
+  mCurrentSeekTarget = mSeekTarget;
+  mSeekTarget.Reset();
+  mDropAudioUntilNextDiscontinuity = HasAudio();
+  mDropVideoUntilNextDiscontinuity = HasVideo();
+
+  RefPtr<nsIRunnable> task(
     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek));
-  if (NS_SUCCEEDED(rv)) {
-    mDispatchedDecodeSeekTask = true;
-  } else {
+  nsresult rv = mDecodeTaskQueue->Dispatch(task);
+  if (NS_FAILED(rv)) {
     NS_WARNING("Dispatch DecodeSeek task failed.");
+    mCurrentSeekTarget.Reset();
+    DecodeError();
   }
   return rv;
 }
 
 nsresult
 MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
@@ -1592,31 +1965,35 @@ MediaDecoderStateMachine::DispatchAudioD
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued()
 {
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
 
+  SAMPLE_LOG("EnsureAudioDecodeTaskQueued isDecoding=%d dispatched=%d",
+              IsAudioDecoding(), mAudioRequestPending);
+
   if (mState >= DECODER_STATE_COMPLETED) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
 
-  if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) {
-    nsresult rv = mDecodeTaskQueue->Dispatch(
+  if (IsAudioDecoding() && !mAudioRequestPending) {
+    RefPtr<nsIRunnable> task(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio));
+    nsresult rv = mDecodeTaskQueue->Dispatch(task);
     if (NS_SUCCEEDED(rv)) {
-      mDispatchedAudioDecodeTask = true;
+      mAudioRequestPending = true;
     } else {
       NS_WARNING("Failed to dispatch task to decode audio");
     }
   }
 
   return NS_OK;
 }
 
@@ -1632,31 +2009,36 @@ MediaDecoderStateMachine::DispatchVideoD
   }
 
   return NS_OK;
 }
 
 nsresult
 MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued()
 {
-  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+  AssertCurrentThreadInMonitor();
+
+  SAMPLE_LOG("EnsureVideoDecodeTaskQueued isDecoding=%d dispatched=%d",
+             IsVideoDecoding(), mVideoRequestPending);
+
   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
                "Should be on state machine or decode thread.");
 
   if (mState >= DECODER_STATE_COMPLETED) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
 
-  if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) {
-    nsresult rv = mDecodeTaskQueue->Dispatch(
+  if (IsVideoDecoding() && !mVideoRequestPending) {
+    RefPtr<nsIRunnable> task(
       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo));
+    nsresult rv = mDecodeTaskQueue->Dispatch(task);
     if (NS_SUCCEEDED(rv)) {
-      mDispatchedVideoDecodeTask = true;
+      mVideoRequestPending = true;
     } else {
       NS_WARNING("Failed to dispatch task to decode video");
     }
   }
 
   return NS_OK;
 }
 
@@ -1703,22 +2085,18 @@ int64_t MediaDecoderStateMachine::AudioD
 
 bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
 {
   AssertCurrentThreadInMonitor();
   // We consider ourselves low on decoded data if we're low on audio,
   // provided we've not decoded to the end of the audio stream, or
   // if we're low on video frames, provided
   // we've not decoded to the end of the video stream.
-  return ((HasAudio() &&
-           !AudioQueue().IsFinished() &&
-           AudioDecodedUsecs() < aAudioUsecs)
-          ||
-         (HasVideo() &&
-          !VideoQueue().IsFinished() &&
+  return ((IsAudioDecoding() && AudioDecodedUsecs() < aAudioUsecs) ||
+         (IsVideoDecoding() &&
           static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
 }
 
 bool MediaDecoderStateMachine::HasLowUndecodedData()
 {
   return HasLowUndecodedData(mLowDataThresholdUsecs);
 }
 
@@ -1747,20 +2125,25 @@ bool MediaDecoderStateMachine::HasLowUnd
 }
 
 void
 MediaDecoderStateMachine::DecodeError()
 {
   AssertCurrentThreadInMonitor();
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
 
+  if (mState == DECODER_STATE_SHUTDOWN) {
+    // Already shutdown.
+    return;
+  }
+
   // Change state to shutdown before sending error report to MediaDecoder
   // and the HTMLMediaElement, so that our pipeline can start exiting
   // cleanly during the sync dispatch below.
-  DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN");
+  DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN due to error");
   ScheduleStateMachine();
   mState = DECODER_STATE_SHUTDOWN;
   mDecoder->GetReentrantMonitor().NotifyAll();
 
   // Dispatch the event to call DecodeError synchronously. This ensures
   // we're in shutdown state by the time we exit the decode thread.
   // If we just moved to shutdown state here on the decode thread, we may
   // cause the state machine to shutdown/free memory without closing its
@@ -1794,47 +2177,98 @@ nsresult MediaDecoderStateMachine::Decod
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
   if (mState != DECODER_STATE_DECODING_METADATA) {
     return NS_ERROR_FAILURE;
   }
 
   nsresult res;
   MediaInfo info;
-  MetadataTags* tags;
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    res = mReader->ReadMetadata(&info, &tags);
+    res = mReader->ReadMetadata(&info, getter_Transfers(mMetadataTags));
   }
-  if (NS_SUCCEEDED(res) &&
-      mState == DECODER_STATE_DECODING_METADATA &&
-      mReader->IsWaitingMediaResources()) {
-    // change state to DECODER_STATE_WAIT_FOR_RESOURCES
-    StartWaitForResources();
-    return NS_OK;
+  if (NS_SUCCEEDED(res)) {
+    if (mState == DECODER_STATE_DECODING_METADATA &&
+        mReader->IsWaitingMediaResources()) {
+      // change state to DECODER_STATE_WAIT_FOR_RESOURCES
+      StartWaitForResources();
+      return NS_OK;
+    }
   }
 
   mInfo = info;
 
   if (NS_FAILED(res) || (!info.HasValidMedia())) {
     return NS_ERROR_FAILURE;
   }
   mDecoder->StartProgressUpdates();
   mGotDurationFromMetaData = (GetDuration() != -1);
 
-  VideoData* videoData = FindStartTime();
-  if (videoData) {
-    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    RenderVideoFrame(videoData, TimeStamp::Now());
+  if (HasAudio()) {
+    RefPtr<nsIRunnable> decodeTask(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
+    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
+  }
+  if (HasVideo()) {
+    RefPtr<nsIRunnable> decodeTask(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
+    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
   }
 
+  if (mRealTime) {
+    SetStartTime(0);
+    res = FinishDecodeMetadata();
+    NS_ENSURE_SUCCESS(res, res);
+  } else {
+    if (HasAudio()) {
+      ReentrantMonitorAutoExit unlock(mDecoder->GetReentrantMonitor());
+      mReader->RequestAudioData();
+    }
+    if (HasVideo()) {
+      ReentrantMonitorAutoExit unlock(mDecoder->GetReentrantMonitor());
+      mReader->RequestVideoData(false, 0);
+    }
+  }
+
+  return NS_OK;
+}
+
+nsresult
+MediaDecoderStateMachine::FinishDecodeMetadata()
+{
+  AssertCurrentThreadInMonitor();
+  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+  DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
+
   if (mState == DECODER_STATE_SHUTDOWN) {
     return NS_ERROR_FAILURE;
   }
 
+  if (!mRealTime) {
+
+    const VideoData* v = VideoQueue().PeekFront();
+    const AudioData* a = AudioQueue().PeekFront();
+
+    int64_t startTime = std::min<int64_t>(a ? a->mTime : INT64_MAX,
+                                          v ? v->mTime : INT64_MAX);
+    if (startTime == INT64_MAX) {
+      startTime = 0;
+    }
+    DECODER_LOG(PR_LOG_DEBUG, "DecodeMetadata first video frame start %lld",
+                              v ? v->mTime : -1);
+    DECODER_LOG(PR_LOG_DEBUG, "DecodeMetadata first audio frame start %lld",
+                              a ? a->mTime : -1);
+    SetStartTime(startTime);
+    if (VideoQueue().GetSize()) {
+      ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+      RenderVideoFrame(VideoQueue().PeekFront(), TimeStamp::Now());
+    }
+  }
+
   NS_ASSERTION(mStartTime != -1, "Must have start time");
   MOZ_ASSERT((!HasVideo() && !HasAudio()) ||
               !(mMediaSeekable && mTransportSeekable) || mEndTime != -1,
               "Active seekable media should have end time");
   MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) ||
              GetDuration() != -1, "Seekable media should have duration");
   DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) "
               "transportSeekable=%d, mediaSeekable=%d",
@@ -1850,36 +2284,25 @@ nsresult MediaDecoderStateMachine::Decod
 
   // Inform the element that we've loaded the metadata and the first frame.
   nsCOMPtr<nsIRunnable> metadataLoadedEvent =
     new AudioMetadataEventRunner(mDecoder,
                                  mInfo.mAudio.mChannels,
                                  mInfo.mAudio.mRate,
                                  HasAudio(),
                                  HasVideo(),
-                                 tags);
-  NS_DispatchToMainThread(metadataLoadedEvent);
-
-  if (HasAudio()) {
-    RefPtr<nsIRunnable> decodeTask(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
-    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
-  }
-  if (HasVideo()) {
-    RefPtr<nsIRunnable> decodeTask(
-      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
-    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
-  }
+                                 mMetadataTags.forget());
+  NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
 
   if (mState == DECODER_STATE_DECODING_METADATA) {
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING");
     StartDecoding();
   }
 
-  // For very short media FindStartTime() can decode the entire media.
+  // For very short media the metadata decode can decode the entire media.
   // So we need to check if this has occurred, else our decode pipeline won't
   // run (since it doesn't need to) and we won't detect end of stream.
   CheckIfDecodeComplete();
 
   if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) &&
       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
       !IsPlaying())
   {
@@ -1888,35 +2311,34 @@ nsresult MediaDecoderStateMachine::Decod
 
   return NS_OK;
 }
 
 void MediaDecoderStateMachine::DecodeSeek()
 {
   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  AutoSetOnScopeExit<bool> unsetOnExit(mDispatchedDecodeSeekTask, false);
   if (mState != DECODER_STATE_SEEKING) {
     return;
   }
 
   // During the seek, don't have a lock on the decoder state,
   // otherwise long seek operations can block the main thread.
   // The events dispatched to the main thread are SYNC calls.
   // These calls are made outside of the decode monitor lock so
   // it is safe for the main thread to makes calls that acquire
   // the lock since it won't deadlock. We check the state when
   // acquiring the lock again in case shutdown has occurred
   // during the time when we didn't have the lock.
-  int64_t seekTime = mSeekTarget.mTime;
+  int64_t seekTime = mCurrentSeekTarget.mTime;
   mDecoder->StopProgressUpdates();
 
   bool currentTimeChanged = false;
-  const int64_t mediaTime = GetMediaTime();
-  if (mediaTime != seekTime) {
+  mCurrentTimeBeforeSeek = GetMediaTime();
+  if (mCurrentTimeBeforeSeek != seekTime) {
     currentTimeChanged = true;
     // Stop playback now to ensure that while we're outside the monitor
     // dispatching SeekingStarted, playback doesn't advance and mess with
     // mCurrentFrameTime that we've setting to seekTime here.
     StopPlayback();
     UpdatePlaybackPositionInternal(seekTime);
   }
 
@@ -1930,92 +2352,104 @@ void MediaDecoderStateMachine::DecodeSee
   // inform the element and its users that we have no frames
   // to display
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     nsCOMPtr<nsIRunnable> startEvent =
       NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted);
     NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC);
   }
-
-  int64_t newCurrentTime = seekTime;
-  if (currentTimeChanged) {
+  if (mState != DECODER_STATE_SEEKING) {
+    // May have shutdown while we released the monitor.
+    return;
+  }
+
+  if (!currentTimeChanged) {
+    DECODER_LOG(PR_LOG_DEBUG, "Seek !currentTimeChanged...");
+    mDecodeToSeekTarget = false;
+    nsresult rv = mDecodeTaskQueue->Dispatch(
+      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SeekCompleted));
+    if (NS_FAILED(rv)) {
+      DecodeError();
+    }
+  } else {
     // The seek target is different than the current playback position,
     // we'll need to seek the playback position, so shutdown our decode
     // and audio threads.
     StopAudioThread();
     ResetPlayback();
+
     nsresult res;
     {
       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-      // Now perform the seek. We must not hold the state machine monitor
-      // while we seek, since the seek reads, which could block on I/O.
-      res = mReader->Seek(seekTime,
-                          mStartTime,
-                          mEndTime,
-                          mediaTime);
-
-      if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) {
-        res = mReader->DecodeToTarget(seekTime);
+      // We must not hold the state machine monitor while we call into
+      // the reader, since it could do I/O or deadlock some other way.
+      res = mReader->ResetDecode();
+      if (NS_SUCCEEDED(res)) {
+        res = mReader->Seek(seekTime,
+                            mStartTime,
+                            mEndTime,
+                            mCurrentTimeBeforeSeek);
       }
     }
-
-    if (NS_SUCCEEDED(res)) {
-      int64_t nextSampleStartTime = 0;
-      VideoData* video = nullptr;
+    if (NS_FAILED(res)) {
+      DecodeError();
+      return;
+    }
+
+    // We must decode the first samples of active streams, so we can determine
+    // the new stream time. So dispatch tasks to do that.
+    mDecodeToSeekTarget = true;
+    DispatchDecodeTasksIfNeeded();
+  }
+}
+
+void
+MediaDecoderStateMachine::SeekCompleted()
+{
+  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
+
+  // We must reset the seek target when exiting this function, but not
+  // before, as if we dropped the monitor in any function called here,
+  // we may begin a new seek on the state machine thread, and be in
+  // an inconsistent state.
+  AutoSetOnScopeExit<SeekTarget> reset(mCurrentSeekTarget, SeekTarget());
+
+  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
+  if (mState != DECODER_STATE_SEEKING) {
+    return;
+  }
+
+  int64_t seekTime = mCurrentSeekTarget.mTime;
+  int64_t newCurrentTime = mCurrentSeekTarget.mTime;
+
+  // Setup timestamp state.
+  VideoData* video = VideoQueue().PeekFront();
+  if (seekTime == mEndTime) {
+    newCurrentTime = mAudioStartTime = seekTime;
+  } else if (HasAudio()) {
+    AudioData* audio = AudioQueue().PeekFront();
+    newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
+  } else {
+    newCurrentTime = video ? video->mTime : seekTime;
+  }
+  mPlayDuration = newCurrentTime - mStartTime;
+
+  if (HasVideo()) {
+    if (video) {
       {
         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-        video = mReader->FindStartTime(nextSampleStartTime);
-      }
-
-      if (seekTime > mediaTime &&
-          nextSampleStartTime < mediaTime &&
-          mSeekTarget.mType == SeekTarget::PrevSyncPoint) {
-        // We are doing a fastSeek, but we ended up *before* the previous
-        // playback position. This is surprising UX, so switch to an accurate
-        // seek and decode to the seek target. This is not conformant to the
-        // spec, fastSeek should always be fast, but until we get the time to
-        // change all Readers to seek to the keyframe after the currentTime
-        // in this case, we'll just decode forward. Bug 1026330.
-        ResetPlayback();
-        {
-          ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-          res = mReader->DecodeToTarget(seekTime);
-          if (NS_SUCCEEDED(res)) {
-            video = mReader->FindStartTime(nextSampleStartTime);
-          }
-        }
+        RenderVideoFrame(video, TimeStamp::Now());
       }
-
-      // Setup timestamp state.
-      if (seekTime == mEndTime) {
-        newCurrentTime = mAudioStartTime = seekTime;
-      } else if (HasAudio()) {
-        AudioData* audio = AudioQueue().PeekFront();
-        newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
-      } else {
-        newCurrentTime = video ? video->mTime : seekTime;
-      }
-      mPlayDuration = newCurrentTime - mStartTime;
-
-      if (HasVideo()) {
-        if (video) {
-          {
-            ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-            RenderVideoFrame(video, TimeStamp::Now());
-          }
-          nsCOMPtr<nsIRunnable> event =
-            NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
-          NS_DispatchToMainThread(event);
-        }
-      }
-    } else {
-      DecodeError();
+      nsCOMPtr<nsIRunnable> event =
+        NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
+      NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
     }
   }
+
   mDecoder->StartProgressUpdates();
   if (mState == DECODER_STATE_DECODING_METADATA ||
       mState == DECODER_STATE_DORMANT ||
       mState == DECODER_STATE_SHUTDOWN) {
     return;
   }
 
   // Change state to DECODING or COMPLETED now. SeekingStopped will
@@ -2028,30 +2462,27 @@ void MediaDecoderStateMachine::DecodeSee
     // Seeked to end of media, move to COMPLETED state. Note we don't do
     // this if we're playing a live stream, since the end of media will advance
     // once we download more data!
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime);
     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd);
     // Explicitly set our state so we don't decode further, and so
     // we report playback ended to the media element.
     mState = DECODER_STATE_COMPLETED;
-    mIsAudioDecoding = false;
-    mIsVideoDecoding = false;
     DispatchDecodeTasksIfNeeded();
   } else {
     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime);
     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped);
     StartDecoding();
   }
 
-  if (newCurrentTime != mediaTime) {
-    UpdatePlaybackPositionInternal(newCurrentTime);
-    if (mDecoder->GetDecodedStream()) {
-      SetSyncPointForMediaStream();
-    }
+  // Ensure timestamps are up to date.
+  UpdatePlaybackPositionInternal(newCurrentTime);
+  if (mDecoder->GetDecodedStream()) {
+    SetSyncPointForMediaStream();
   }
 
   // Try to decode another frame to detect if we're at the end...
   DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime);
 
   {
     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
     NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC);
@@ -2068,18 +2499,20 @@ void MediaDecoderStateMachine::DecodeSee
 // Runnable to dispose of the decoder and state machine on the main thread.
 class nsDecoderDisposeEvent : public nsRunnable {
 public:
   nsDecoderDisposeEvent(already_AddRefed<MediaDecoder> aDecoder,
                         already_AddRefed<MediaDecoderStateMachine> aStateMachine)
     : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
   NS_IMETHOD Run() {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
-    mStateMachine->ReleaseDecoder();
-    mDecoder->ReleaseStateMachine();
+    MOZ_ASSERT(mStateMachine);
+    MOZ_ASSERT(mDecoder);
+    mStateMachine->BreakCycles();
+    mDecoder->BreakCycles();
     mStateMachine = nullptr;
     mDecoder = nullptr;
     return NS_OK;
   }
 private:
   nsRefPtr<MediaDecoder> mDecoder;
   nsRefPtr<MediaDecoderStateMachine> mStateMachine;
 };
@@ -2110,39 +2543,68 @@ nsresult MediaDecoderStateMachine::RunSt
   MediaResource* resource = mDecoder->GetResource();
   NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER);
 
   switch (mState) {
     case DECODER_STATE_SHUTDOWN: {
       if (IsPlaying()) {
         StopPlayback();
       }
+
+      // Put a task in the decode queue to abort any decoding operations.
+      // The reader is not supposed to put any tasks to deliver samples into
+      // the queue after we call this (unless we request another sample from it).
+      RefPtr<nsIRunnable> task;
+      task = NS_NewRunnableMethod(mReader, &MediaDecoderReader::ResetDecode);
+      mDecodeTaskQueue->Dispatch(task);
+
+      {
+        // Wait for the thread decoding to abort decoding operations and run
+        // any pending callbacks. This is important, as we don't want any
+        // pending tasks posted to the task queue by the reader to deliver
+        // any samples after we've posted the reader Shutdown() task below,
+        // as the sample-delivery tasks will keep video frames alive until
+        // after we've called Reader::Shutdown(), and shutdown on B2G will
+        // fail as there are outstanding video frames alive.
+        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+        mDecodeTaskQueue->Flush();
+      }
+
+      // We must reset playback so that all references to frames queued
+      // in the state machine are dropped, else the Shutdown() call below
+      // can fail on B2G.
+      ResetPlayback();
+
+      // Put a task in the decode queue to shutdown the reader.
+      task = NS_NewRunnableMethod(mReader, &MediaDecoderReader::Shutdown);
+      mDecodeTaskQueue->Dispatch(task);
+
       StopAudioThread();
       // If mAudioThread is non-null after StopAudioThread completes, we are
       // running in a nested event loop waiting for Shutdown() on
       // mAudioThread to complete.  Return to the event loop and let it
       // finish processing before continuing with shutdown.
       if (mAudioThread) {
         MOZ_ASSERT(mStopAudioThread);
         return NS_OK;
       }
 
+      {
+        // Wait for the thread decoding to exit.
+        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
+        mDecodeTaskQueue->Shutdown();
+        mDecodeTaskQueue = nullptr;
+      }
+
       // The reader's listeners hold references to the state machine,
       // creating a cycle which keeps the state machine and its shared
       // thread pools alive. So break it here.
       AudioQueue().ClearListeners();
       VideoQueue().ClearListeners();
 
-      {
-        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-        // Wait for the thread decoding to exit.
-        mDecodeTaskQueue->Shutdown();
-        mDecodeTaskQueue = nullptr;
-        mReader->ReleaseMediaResources();
-      }
       // Now that those threads are stopped, there's no possibility of
       // mPendingWakeDecoder being needed again. Revoke it.
       mPendingWakeDecoder = nullptr;
 
       MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN,
                  "How did we escape from the shutdown state?");
       // We must daisy-chain these events to destroy the decoder. We must
       // destroy the decoder on the main thread, but we can't destroy the
@@ -2249,18 +2711,17 @@ nsresult MediaDecoderStateMachine::RunSt
       {
         StartPlayback();
       }
       NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
       return NS_OK;
     }
 
     case DECODER_STATE_SEEKING: {
-      // Ensure we have a decode thread to perform the seek.
-     return EnqueueDecodeSeekTask();
+      return EnqueueDecodeSeekTask();
     }
 
     case DECODER_STATE_COMPLETED: {
       // Play the remaining media. We want to run AdvanceFrame() at least
       // once to ensure the current playback position is advanced to the
       // end of the media, and so that we update the readyState.
       if (VideoQueue().GetSize() > 0 ||
           (HasAudio() && !mAudioCompleted) ||
@@ -2549,44 +3010,140 @@ void MediaDecoderStateMachine::Wait(int6
     int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000));
     if (ms == 0 || ms > UINT32_MAX) {
       break;
     }
     mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms)));
   }
 }
 
-VideoData* MediaDecoderStateMachine::FindStartTime()
+nsresult
+MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample)
+{
+  nsAutoPtr<VideoData> video(aSample);
+
+  const int64_t target = mCurrentSeekTarget.mTime;
+
+  // If the frame end time is less than the seek target, we won't want
+  // to display this frame after the seek, so discard it.
+  if (target >= video->GetEndTime()) {
+    DECODER_LOG(PR_LOG_DEBUG,
+                "DropVideoUpToSeekTarget() pop video frame [%lld, %lld] target=%lld",
+                video->mTime, video->GetEndTime(), target);
+    mFirstVideoFrameAfterSeek = video;
+  } else {
+    if (target >= video->mTime && video->GetEndTime() >= target) {
+      // The seek target lies inside this frame's time slice. Adjust the frame's
+      // start time to match the seek target. We do this by replacing the
+      // first frame with a shallow copy which has the new timestamp.
+      VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, target);
+      video = temp;
+    }
+    mFirstVideoFrameAfterSeek = nullptr;
+
+    DECODER_LOG(PR_LOG_DEBUG,
+                "DropVideoUpToSeekTarget() found video frame [%lld, %lld] containing target=%lld",
+                video->mTime, video->GetEndTime(), target);
+
+    VideoQueue().PushFront(video.forget());
+
+  }
+  return NS_OK;
+}
+
+nsresult
+MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample)
+{
+  nsAutoPtr<AudioData> audio(aSample);
+  MOZ_ASSERT(audio &&
+             mCurrentSeekTarget.IsValid() &&
+             mCurrentSeekTarget.mType == SeekTarget::Accurate);
+
+  CheckedInt64 startFrame = UsecsToFrames(audio->mTime,
+                                          mInfo.mAudio.mRate);
+  CheckedInt64 targetFrame = UsecsToFrames(mCurrentSeekTarget.mTime,
+                                           mInfo.mAudio.mRate);
+  if (!startFrame.isValid() || !targetFrame.isValid()) {
+    return NS_ERROR_FAILURE;
+  }
+  if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
+    // Our seek target lies after the frames in this AudioData. Don't
+    // push it onto the audio queue, and keep decoding forwards.
+    return NS_OK;
+  }
+  if (startFrame.value() > targetFrame.value()) {
+    // The seek target doesn't lie in the audio block just after the last
+    // audio frames we've seen which were before the seek target. This
+    // could have been the first audio data we've seen after seek, i.e. the
+    // seek terminated after the seek target in the audio stream. Just
+    // abort the audio decode-to-target, the state machine will play
+    // silence to cover the gap. Typically this happens in poorly muxed
+    // files.
+    NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
+    AudioQueue().Push(audio.forget());
+    return NS_OK;
+  }
+
+  // The seek target lies somewhere in this AudioData's frames, strip off
+  // any frames which lie before the seek target, so we'll begin playback
+  // exactly at the seek target.
+  NS_ASSERTION(targetFrame.value() >= startFrame.value(),
+               "Target must at or be after data start.");
+  NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
+               "Data must end after target.");
+
+  int64_t framesToPrune = targetFrame.value() - startFrame.value();
+  if (framesToPrune > audio->mFrames) {
+    // We've messed up somehow. Don't try to trim frames, the |frames|
+    // variable below will overflow.
+    NS_WARNING("Can't prune more frames that we have!");
+    return NS_ERROR_FAILURE;
+  }
+  uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
+  uint32_t channels = audio->mChannels;
+  nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
+  memcpy(audioData.get(),
+         audio->mAudioData.get() + (framesToPrune * channels),
+         frames * channels * sizeof(AudioDataValue));
+  CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
+  if (!duration.isValid()) {
+    return NS_ERROR_FAILURE;
+  }
+  nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
+                                          mCurrentSeekTarget.mTime,
+                                          duration.value(),
+                                          frames,
+                                          audioData.forget(),
+                                          channels));
+  AudioQueue().PushFront(data.forget());
+
+  return NS_OK;
+}
+
+void MediaDecoderStateMachine::SetStartTime(int64_t aStartTimeUsecs)
 {
   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
-  AssertCurrentThreadInMonitor();
-  int64_t startTime = 0;
+  DECODER_LOG(PR_LOG_DEBUG, "SetStartTime(%lld)", aStartTimeUsecs);
   mStartTime = 0;
-  VideoData* v = nullptr;
-  {
-    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
-    v = mReader->FindStartTime(startTime);
-  }
-  if (startTime != 0) {
-    mStartTime = startTime;
+  if (aStartTimeUsecs != 0) {
+    mStartTime = aStartTimeUsecs;
     if (mGotDurationFromMetaData) {
       NS_ASSERTION(mEndTime != -1,
                    "We should have mEndTime as supplied duration here");
       // We were specified a duration from a Content-Duration HTTP header.
       // Adjust mEndTime so that mEndTime-mStartTime matches the specified
       // duration.
       mEndTime = mStartTime + mEndTime;
     }
   }
   // Set the audio start time to be start of media. If this lies before the
   // first actual audio frame we have, we'll inject silence during playback
   // to ensure the audio starts at the correct time.
   mAudioStartTime = mStartTime;
-  DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime);
-  return v;
+  DECODER_LOG(PR_LOG_DEBUG, "Set media start time to %lld", mStartTime);
 }
 
 void MediaDecoderStateMachine::UpdateReadyState() {
   AssertCurrentThreadInMonitor();
 
   MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus();
   if (nextFrameStatus == mLastFrameStatus) {
     return;
@@ -2801,17 +3358,17 @@ void MediaDecoderStateMachine::SetPlayba
   if (mPlaybackRate == aPlaybackRate) {
     return;
   }
 
   // Get position of the last time we changed the rate.
   if (!HasAudio()) {
     // mBasePosition is a position in the video stream, not an absolute time.
     if (mState == DECODER_STATE_SEEKING) {
-      mBasePosition = mSeekTarget.mTime - mStartTime;
+      mBasePosition = mCurrentSeekTarget.mTime - mStartTime;
     } else {
       mBasePosition = GetVideoStreamPosition();
     }
     mPlayDuration = mBasePosition;
     mResetPlayStartTime = true;
     mPlayStartTime = TimeStamp::Now();
   }
 
--- a/content/media/MediaDecoderStateMachine.h
+++ b/content/media/MediaDecoderStateMachine.h
@@ -1,76 +1,82 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 /*
-Each video element for a media file has two threads:
+
+Each media element for a media file has one thread called the "audio thread".
+
+The audio thread  writes the decoded audio data to the audio
+hardware. This is done in a separate thread to ensure that the
+audio hardware gets a constant stream of data without
+interruption due to decoding or display. At some point
+AudioStream will be refactored to have a callback interface
+where it asks for data and this thread will no longer be
+needed.
 
-  1) The Audio thread writes the decoded audio data to the audio
-     hardware. This is done in a separate thread to ensure that the
-     audio hardware gets a constant stream of data without
-     interruption due to decoding or display. At some point
-     AudioStream will be refactored to have a callback interface
-     where it asks for data and an extra thread will no longer be
-     needed.
+The element/state machine also has a MediaTaskQueue which runs in a
+SharedThreadPool that is shared with all other elements/decoders. The state
+machine dispatches tasks to this to call into the MediaDecoderReader to
+request decoded audio or video data. The Reader will callback with decoded
+sampled when it has them available, and the state machine places the decoded
+samples into its queues for the consuming threads to pull from.
 
-  2) The decode thread. This thread reads from the media stream and
-     decodes the Theora and Vorbis data. It places the decoded data into
-     queues for the other threads to pull from.
+The MediaDecoderReader can choose to decode asynchronously, or synchronously
+and return requested samples synchronously inside it's Request*Data()
+functions via callback. Asynchronous decoding is preferred, and should be
+used for any new readers.
 
-All file reads, seeks, and all decoding must occur on the decode thread.
 Synchronisation of state between the thread is done via a monitor owned
 by MediaDecoder.
 
-The lifetime of the decode and audio threads is controlled by the state
-machine when it runs on the shared state machine thread. When playback
-needs to occur they are created and events dispatched to them to run
-them. These events exit when decoding/audio playback is completed or
-no longer required.
+The lifetime of the audio thread is controlled by the state machine when
+it runs on the shared state machine thread. When playback needs to occur
+the audio thread is created and an event dispatched to run it. The audio
+thread exits when audio playback is completed or no longer required.
 
 A/V synchronisation is handled by the state machine. It examines the audio
 playback time and compares this to the next frame in the queue of video
 frames. If it is time to play the video frame it is then displayed, otherwise
 it schedules the state machine to run again at the time of the next frame.
 
 Frame skipping is done in the following ways:
 
   1) The state machine will skip all frames in the video queue whose
      display time is less than the current audio time. This ensures
      the correct frame for the current time is always displayed.
 
-  2) The decode thread will stop decoding interframes and read to the
+  2) The decode tasks will stop decoding interframes and read to the
      next keyframe if it determines that decoding the remaining
      interframes will cause playback issues. It detects this by:
        a) If the amount of audio data in the audio queue drops
           below a threshold whereby audio may start to skip.
        b) If the video queue drops below a threshold where it
           will be decoding video data that won't be displayed due
           to the decode thread dropping the frame immediately.
+     TODO: In future we should only do this when the Reader is decoding
+           synchronously.
 
 When hardware accelerated graphics is not available, YCbCr conversion
-is done on the decode thread when video frames are decoded.
+is done on the decode task queue when video frames are decoded.
 
-The decode thread pushes decoded audio and videos frames into two
+The decode task queue pushes decoded audio and videos frames into two
 separate queues - one for audio and one for video. These are kept
 separate to make it easy to constantly feed audio data to the audio
 hardware while allowing frame skipping of video data. These queues are
 threadsafe, and neither the decode, audio, or state machine should
 be able to monopolize them, and cause starvation of the other threads.
 
 Both queues are bounded by a maximum size. When this size is reached
-the decode thread will no longer decode video or audio depending on the
-queue that has reached the threshold. If both queues are full, the decode
-thread will wait on the decoder monitor.
-
-When the decode queues are full (they've reaced their maximum size) and
-the decoder is not in PLAYING play state, the state machine may opt
-to shut down the decode thread in order to conserve resources.
+the decode tasks will no longer request video or audio depending on the
+queue that has reached the threshold. If both queues are full, no more
+decode tasks will be dispatched to the decode task queue, so other
+decoders will have an opportunity to run.
 
 During playback the audio thread will be idle (via a Wait() on the
 monitor) if the audio queue is empty. Otherwise it constantly pops
 audio data off the queue and plays it with a blocking write to the audio
 hardware (via AudioStream).
 
 */
 #if !defined(MediaDecoderStateMachine_h__)
@@ -78,16 +84,17 @@ hardware (via AudioStream).
 
 #include "mozilla/Attributes.h"
 #include "nsThreadUtils.h"
 #include "MediaDecoder.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "MediaDecoderReader.h"
 #include "MediaDecoderOwner.h"
 #include "MediaMetadataManager.h"
+#include "MediaDataDecodedListener.h"
 
 class nsITimer;
 
 namespace mozilla {
 
 class AudioSegment;
 class VideoSegment;
 class MediaTaskQueue;
@@ -97,17 +104,17 @@ class SharedThreadPool;
 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
 // implementation.
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
 
 /*
   The state machine class. This manages the decoding and seeking in the
-  MediaDecoderReader on the decode thread, and A/V sync on the shared
+  MediaDecoderReader on the decode task queue, and A/V sync on the shared
   state machine thread, and controls the audio "push" thread.
 
   All internal state is synchronised via the decoder monitor. State changes
   are either propagated by NotifyAll on the monitor (typically when state
   changes need to be propagated to non-state machine threads) or by scheduling
   the state machine to run another cycle on the shared state machine thread.
 
   See MediaDecoder.h for more details.
@@ -307,20 +314,19 @@ public:
 
   // Timer function to implement ScheduleStateMachine(aUsecs).
   nsresult TimeoutExpired(int aGeneration);
 
   // Set the media fragment end time. aEndTime is in microseconds.
   void SetFragmentEndTime(int64_t aEndTime);
 
   // Drop reference to decoder.  Only called during shutdown dance.
-  void ReleaseDecoder() {
-    MOZ_ASSERT(mReader);
+  void BreakCycles() {
     if (mReader) {
-      mReader->ReleaseDecoder();
+      mReader->BreakCycles();
     }
     mDecoder = nullptr;
   }
 
   // If we're playing into a MediaStream, record the current point in the
   // MediaStream and the current point in our media resource so later we can
   // convert MediaStream playback positions to media resource positions. Best to
   // call this while we're not playing (while the MediaStream is blocked). Can
@@ -352,21 +358,32 @@ public:
 
   // Notifies the state machine that should minimize the number of samples
   // decoded we preroll, until playback starts. The first time playback starts
   // the state machine is free to return to prerolling normally. Note
   // "prerolling" in this context refers to when we decode and buffer decoded
   // samples in advance of when they're needed for playback.
   void SetMinimizePrerollUntilPlaybackStarts();
 
+  void OnAudioDecoded(AudioData* aSample);
+  void OnAudioEOS();
+  void OnVideoDecoded(VideoData* aSample);
+  void OnVideoEOS();
+  void OnDecodeError();
+
 protected:
   virtual ~MediaDecoderStateMachine();
 
   void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
 
+  // Inserts MediaData* samples into their respective MediaQueues.
+  // aSample must not be null.
+  void Push(AudioData* aSample);
+  void Push(VideoData* aSample);
+
   class WakeDecoderRunnable : public nsRunnable {
   public:
     WakeDecoderRunnable(MediaDecoderStateMachine* aSM)
       : mMutex("WakeDecoderRunnable"), mStateMachine(aSM) {}
     NS_IMETHOD Run() MOZ_OVERRIDE
     {
       nsRefPtr<MediaDecoderStateMachine> stateMachine;
       {
@@ -392,18 +409,24 @@ protected:
     // would mean in some cases we'd have to destroy mStateMachine from this
     // object, which would be problematic since MediaDecoderStateMachine can
     // only be destroyed on the main thread whereas this object can be destroyed
     // on the media stream graph thread.
     MediaDecoderStateMachine* mStateMachine;
   };
   WakeDecoderRunnable* GetWakeDecoderRunnable();
 
-  MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
-  MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
+  MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
+  MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
+
+  nsresult FinishDecodeMetadata();
+
+  RefPtr<MediaDataDecodedListener<MediaDecoderStateMachine>> mMediaDecodedListener;
+
+  nsAutoPtr<MetadataTags> mMetadataTags;
 
   // True if our buffers of decoded audio are not full, and we should
   // decode more.
   bool NeedToDecodeAudio();
 
   // Decodes some audio. This should be run on the decode task queue.
   void DecodeAudio();
 
@@ -463,21 +486,20 @@ protected:
   // so far.
   int64_t GetVideoStreamPosition();
 
   // Return the current time, either the audio clock if available (if the media
   // has audio, and the playback is possible), or a clock for the video.
   // Called on the state machine thread.
   int64_t GetClock();
 
-  // Returns the presentation time of the first audio or video frame in the
-  // media.  If the media has video, it returns the first video frame. The
-  // decoder monitor must be held with exactly one lock count. Called on the
-  // state machine thread.
-  VideoData* FindStartTime();
+  nsresult DropAudioUpToSeekTarget(AudioData* aSample);
+  nsresult DropVideoUpToSeekTarget(VideoData* aSample);
+
+  void SetStartTime(int64_t aStartTimeUsecs);
 
   // Update only the state machine's current playback position (and duration,
   // if unknown).  Does not update the playback position on the decoder or
   // media element -- use UpdatePlaybackPosition for that.  Called on the state
   // machine thread, caller must hold the decoder lock.
   void UpdatePlaybackPositionInternal(int64_t aTime);
 
   // Pushes the image down the rendering pipeline. Called on the shared state
@@ -539,16 +561,20 @@ protected:
 
   void StartWaitForResources();
 
   // Dispatches a task to the decode task queue to begin decoding metadata.
   // This is threadsafe and can be called on any thread.
   // The decoder monitor must be held.
   nsresult EnqueueDecodeMetadataTask();
 
+  // Dispatches a task to the decode task queue to seek the decoder.
+  // The decoder monitor must be held.
+  nsresult EnqueueDecodeSeekTask();
+
   nsresult DispatchAudioDecodeTaskIfNeeded();
 
   // Ensures a to decode audio has been dispatched to the decode task queue.
   // If a task to decode has already been dispatched, this does nothing,
   // otherwise this dispatches a task to do the decode.
   // This is called on the state machine or decode threads.
   // The decoder monitor must be held.
   nsresult EnsureAudioDecodeTaskQueued();
@@ -556,36 +582,26 @@ protected:
   nsresult DispatchVideoDecodeTaskIfNeeded();
 
   // Ensures a to decode video has been dispatched to the decode task queue.
   // If a task to decode has already been dispatched, this does nothing,
   // otherwise this dispatches a task to do the decode.
   // The decoder monitor must be held.
   nsresult EnsureVideoDecodeTaskQueued();
 
-  // Dispatches a task to the decode task queue to seek the decoder.
-  // The decoder monitor must be held.
-  nsresult EnqueueDecodeSeekTask();
-
   // Calls the reader's SetIdle(). This is only called in a task dispatched to
   // the decode task queue, don't call it directly.
   void SetReaderIdle();
 
   // Re-evaluates the state and determines whether we need to dispatch
   // events to run the decode, or if not whether we should set the reader
   // to idle mode. This is threadsafe, and can be called from any thread.
   // The decoder monitor must be held.
   void DispatchDecodeTasksIfNeeded();
 
-  // Queries our state to see whether the decode has finished for all streams.
-  // If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
-  // to run.
-  // The decoder monitor must be held.
-  void CheckIfDecodeComplete();
-
   // Returns the "media time". This is the absolute time which the media
   // playback has reached. i.e. this returns values in the range
   // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
   // not start at 0. Note this is different to the value returned
   // by GetCurrentTime(), which is in the range [0,duration].
   int64_t GetMediaTime() const {
     AssertCurrentThreadInMonitor();
     return mStartTime + mCurrentFrameTime;
@@ -599,25 +615,39 @@ protected:
   // hardware, so this can only be used as a upper bound. The decoder monitor
   // must be held when calling this. Called on the decode thread.
   int64_t GetDecodedAudioDuration();
 
   // Load metadata. Called on the decode thread. The decoder monitor
   // must be held with exactly one lock count.
   nsresult DecodeMetadata();
 
+  // Wraps the call to DecodeMetadata(), signals a DecodeError() on failure.
+  void CallDecodeMetadata();
+
+  // Checks whether we're finished decoding metadata, and switches to DECODING
+  // state if so.
+  void MaybeFinishDecodeMetadata();
+
   // Seeks to mSeekTarget. Called on the decode thread. The decoder monitor
   // must be held with exactly one lock count.
   void DecodeSeek();
 
-  // Decode loop, decodes data until EOF or shutdown.
-  // Called on the decode thread.
-  void DecodeLoop();
+  void CheckIfSeekComplete();
+  bool IsAudioSeekComplete();
+  bool IsVideoSeekComplete();
 
-  void CallDecodeMetadata();
+  // Completes the seek operation, moves onto the next appropriate state.
+  void SeekCompleted();
+
+  // Queries our state to see whether the decode has finished for all streams.
+  // If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
+  // to run.
+  // The decoder monitor must be held.
+  void CheckIfDecodeComplete();
 
   // Copy audio from an AudioData packet to aOutput. This may require
   // inserting silence depending on the timing of the audio packet.
   void SendStreamAudio(AudioData* aAudio, DecodedStreamData* aStream,
                        AudioSegment* aOutput);
 
   // State machine thread run function. Defers to RunStateMachine().
   nsresult CallRunStateMachine();
@@ -632,27 +662,45 @@ protected:
     return !mTimeout.IsNull();
   }
 
   // Returns true if we're not playing and the decode thread has filled its
   // decode buffers and is waiting. We can shut the decode thread down in this
   // case as it may not be needed again.
   bool IsPausedAndDecoderWaiting();
 
+  // These return true if the respective stream's decode has not yet reached
+  // the end of stream.
+  bool IsAudioDecoding();
+  bool IsVideoDecoding();
+
   // The decoder object that created this state machine. The state machine
   // holds a strong reference to the decoder to ensure that the decoder stays
   // alive once media element has started the decoder shutdown process, and has
   // dropped its reference to the decoder. This enables the state machine to
   // keep using the decoder's monitor until the state machine has finished
   // shutting down, without fear of the monitor being destroyed. After
   // shutting down, the state machine will then release this reference,
   // causing the decoder to be destroyed. This is accessed on the decode,
   // state machine, audio and main threads.
   nsRefPtr<MediaDecoder> mDecoder;
 
+  // Time at which the last video sample was requested. If it takes too long
+  // before the sample arrives, we will increase the amount of audio we buffer.
+  // This is necessary for legacy synchronous decoders to prevent underruns.
+  TimeStamp mVideoDecodeStartTime;
+
+  // Queue of audio frames. This queue is threadsafe, and is accessed from
+  // the audio, decoder, state machine, and main threads.
+  MediaQueue<AudioData> mAudioQueue;
+
+  // Queue of video frames. This queue is threadsafe, and is accessed from
+  // the decoder, state machine, and main threads.
+  MediaQueue<VideoData> mVideoQueue;
+
   // The decoder monitor must be obtained before modifying this state.
   // NotifyAll on the monitor must be called when the state is changed so
   // that interested threads can wake up and alter behaviour if appropriate
   // Accessed on state machine, audio, main, and AV thread.
   State mState;
 
   // Thread for pushing audio onto the audio hardware.
   // The "audio push thread".
@@ -714,29 +762,36 @@ protected:
   // machine, decode, and main threads. Access controlled by decoder monitor.
   int64_t mEndTime;
 
   // Position to seek to in microseconds when the seek state transition occurs.
   // The decoder monitor lock must be obtained before reading or writing
   // this value. Accessed on main and decode thread.
   SeekTarget mSeekTarget;
 
+  // The position that we're currently seeking to. This differs from
+  // mSeekTarget, as mSeekTarget is the target we'll seek to next, whereas
+  // mCurrentSeekTarget is the position that the decode is in the process
+  // of seeking to.
+  // The decoder monitor lock must be obtained before reading or writing
+  // this value.
+  SeekTarget mCurrentSeekTarget;
+
   // Media Fragment end time in microseconds. Access controlled by decoder monitor.
   int64_t mFragmentEndTime;
 
   // The audio stream resource. Used on the state machine, and audio threads.
   // This is created and destroyed on the audio thread, while holding the
   // decoder monitor, so if this is used off the audio thread, you must
   // first acquire the decoder monitor and check that it is non-null.
   RefPtr<AudioStream> mAudioStream;
 
   // The reader, don't call its methods with the decoder monitor held.
-  // This is created in the play state machine's constructor, and destroyed
-  // in the play state machine's destructor.
-  nsAutoPtr<MediaDecoderReader> mReader;
+  // This is created in the state machine's constructor.
+  nsRefPtr<MediaDecoderReader> mReader;
 
   // Accessed only on the state machine thread.
   // Not an nsRevocableEventPtr since we must Revoke() it well before
   // this object is destroyed, anyway.
   // Protected by decoder monitor except during the SHUTDOWN state after the
   // decoder thread has been stopped.
   nsRevocableEventPtr<WakeDecoderRunnable> mPendingWakeDecoder;
 
@@ -812,16 +867,22 @@ protected:
   // got a few frames decoded before we consider whether decode is falling
   // behind. Otherwise our "we're falling behind" logic will trigger
   // unneccessarily if we start playing as soon as the first sample is
   // decoded. These two fields store how many video frames and audio
   // samples we must consume before are considered to be finished prerolling.
   uint32_t mAudioPrerollUsecs;
   uint32_t mVideoPrerollFrames;
 
+  // This temporarily stores the first frame we decode after we seek.
+  // This is so that if we hit end of stream while we're decoding to reach
+  // the seek target, we will still have a frame that we can display as the
+  // last frame in the media.
+  nsAutoPtr<VideoData> mFirstVideoFrameAfterSeek;
+
   // When we start decoding (either for the first time, or after a pause)
   // we may be low on decoded data. We don't want our "low data" logic to
   // kick in and decide that we're low on decoded data because the download
   // can't keep up with the decode, and cause us to pause playback. So we
   // have a "preroll" stage, where we ignore the results of our "low data"
   // logic during the first few frames of our decode. This occurs during
   // playback. The flags below are true when the corresponding stream is
   // being "prerolled".
@@ -831,29 +892,21 @@ protected:
   // True when we have an audio stream that we're decoding, and we have not
   // yet decoded to end of stream.
   bool mIsAudioDecoding;
 
   // True when we have a video stream that we're decoding, and we have not
   // yet decoded to end of stream.
   bool mIsVideoDecoding;
 
-  // True when we have dispatched a task to the decode task queue to run
-  // the audio decode.
-  bool mDispatchedAudioDecodeTask;
-
-  // True when we have dispatched a task to the decode task queue to run
-  // the video decode.
-  bool mDispatchedVideoDecodeTask;
-
-  // If the video decode is falling behind the audio, we'll start dropping the
-  // inter-frames up until the next keyframe which is at or before the current
-  // playback position. skipToNextKeyframe is true if we're currently
-  // skipping up to the next keyframe.
-  bool mSkipToNextKeyFrame;
+  // True when we have dispatched a task to the decode task queue to request
+  // decoded audio/video, and/or we are waiting for the requested sample to be
+  // returned by callback from the Reader.
+  bool mAudioRequestPending;
+  bool mVideoRequestPending;
 
   // True if we shouldn't play our audio (but still write it to any capturing
   // streams). When this is true, mStopAudioThread is always true and
   // the audio thread will never start again after it has stopped.
   bool mAudioCaptured;
 
   // True if the media resource can be seeked on a transport level. Accessed
   // from the state machine and main threads. Synchronised via decoder monitor.
@@ -919,20 +972,31 @@ protected:
   // True is we are decoding a realtime stream, like a camera stream
   bool mRealTime;
 
   // True if we've dispatched a task to the decode task queue to call
   // ReadMetadata on the reader. We maintain a flag to ensure that we don't
   // dispatch multiple tasks to re-do the metadata loading.
   bool mDispatchedDecodeMetadataTask;
 
-  // True if we've dispatched a task to the decode task queue to call
-  // Seek on the reader. We maintain a flag to ensure that we don't
-  // dispatch multiple tasks to re-do the seek.
-  bool mDispatchedDecodeSeekTask;
+  // These two flags are true when we need to drop decoded samples that
+  // we receive up to the next discontinuity. We do this when we seek;
+  // the first sample in each stream after the seek is marked as being
+  // a "discontinuity".
+  bool mDropAudioUntilNextDiscontinuity;
+  bool mDropVideoUntilNextDiscontinuity;
+
+  // True if we need to decode forwards to the seek target inside
+  // mCurrentSeekTarget.
+  bool mDecodeToSeekTarget;
+
+  // We record the playback position before we seek in order to
+  // determine where the seek terminated relative to the playback position
+  // we were at before the seek.
+  int64_t mCurrentTimeBeforeSeek;
 
   // Stores presentation info required for playback. The decoder monitor
   // must be held when accessing this.
   MediaInfo mInfo;
 
   mozilla::MediaMetadataManager mMetadataManager;
 
   MediaDecoderOwner::NextFrameStatus mLastFrameStatus;
--- a/content/media/MediaQueue.h
+++ b/content/media/MediaQueue.h
@@ -38,21 +38,23 @@ template <class T> class MediaQueue : pr
 
   inline int32_t GetSize() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     return nsDeque::GetSize();
   }
 
   inline void Push(T* aItem) {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+    MOZ_ASSERT(aItem);
     nsDeque::Push(aItem);
   }
 
   inline void PushFront(T* aItem) {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
+    MOZ_ASSERT(aItem);
     nsDeque::PushFront(aItem);
   }
 
   inline T* PopFront() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     T* rv = static_cast<T*>(nsDeque::PopFront());
     if (rv) {
       NotifyPopListeners();
@@ -70,21 +72,16 @@ template <class T> class MediaQueue : pr
     return static_cast<T*>(nsDeque::PeekFront());
   }
 
   inline void Empty() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     nsDeque::Empty();
   }
 
-  inline void Erase() {
-    ReentrantMonitorAutoEnter mon(mReentrantMonitor);
-    nsDeque::Erase();
-  }
-
   void Reset() {
     ReentrantMonitorAutoEnter mon(mReentrantMonitor);
     while (GetSize() > 0) {
       T* x = PopFront();
       delete x;
     }
     mEndOfStream = false;
   }
--- a/content/media/StreamBuffer.h
+++ b/content/media/StreamBuffer.h
@@ -26,17 +26,17 @@ typedef int32_t TrackID;
 const TrackID TRACK_NONE = 0;
 
 inline TrackTicks RateConvertTicksRoundDown(TrackRate aOutRate,
                                             TrackRate aInRate,
                                             TrackTicks aTicks)
 {
   NS_ASSERTION(0 < aOutRate && aOutRate <= TRACK_RATE_MAX, "Bad out rate");
   NS_ASSERTION(0 < aInRate && aInRate <= TRACK_RATE_MAX, "Bad in rate");
-  NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad ticks");
+  NS_WARN_IF_FALSE(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad ticks"); // bug 957691
   return (aTicks * aOutRate) / aInRate;
 }
 inline TrackTicks RateConvertTicksRoundUp(TrackRate aOutRate,
                                           TrackRate aInRate, TrackTicks aTicks)
 {
   NS_ASSERTION(0 < aOutRate && aOutRate <= TRACK_RATE_MAX, "Bad out rate");
   NS_ASSERTION(0 < aInRate && aInRate <= TRACK_RATE_MAX, "Bad in rate");
   NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad ticks");
--- a/content/media/VideoUtils.cpp
+++ b/content/media/VideoUtils.cpp
@@ -4,16 +4,18 @@
 
 #include "VideoUtils.h"
 #include "MediaResource.h"
 #include "mozilla/dom/TimeRanges.h"
 #include "nsMathUtils.h"
 #include "nsSize.h"
 #include "VorbisUtils.h"
 #include "ImageContainer.h"
+#include "SharedThreadPool.h"
+#include "mozilla/Preferences.h"
 
 #include <stdint.h>
 
 namespace mozilla {
 
 using layers::PlanarYCbCrImage;
 
 // Converts from number of audio frames to microseconds, given the specified
@@ -185,9 +187,15 @@ IsValidVideoRegion(const nsIntSize& aFra
     aPicture.width * aPicture.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
     aPicture.width * aPicture.height != 0 &&
     aDisplay.width <= PlanarYCbCrImage::MAX_DIMENSION &&
     aDisplay.height <= PlanarYCbCrImage::MAX_DIMENSION &&
     aDisplay.width * aDisplay.height <= MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
     aDisplay.width * aDisplay.height != 0;
 }
 
+TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool()
+{
+  return SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
+                               Preferences::GetUint("media.num-decode-threads", 25));
+}
+
 } // end namespace mozilla
--- a/content/media/VideoUtils.h
+++ b/content/media/VideoUtils.h
@@ -15,16 +15,17 @@
 #if !(defined(XP_WIN) || defined(XP_MACOSX) || defined(LINUX)) || \
     defined(MOZ_ASAN)
 // For MEDIA_THREAD_STACK_SIZE
 #include "nsIThreadManager.h"
 #endif
 #include "nsThreadUtils.h"
 #include "prtime.h"
 #include "AudioSampleFormat.h"
+#include "mozilla/RefPtr.h"
 
 using mozilla::CheckedInt64;
 using mozilla::CheckedUint64;
 using mozilla::CheckedInt32;
 using mozilla::CheckedUint32;
 
 struct nsIntSize;
 struct nsIntRect;
@@ -203,11 +204,17 @@ public:
   ~AutoSetOnScopeExit() {
     mVar = mValue;
   }
 private:
   T& mVar;
   const T mValue;
 };
 
+class SharedThreadPool;
+
+// Returns the thread pool that is shared amongst all decoder state machines
+// for decoding streams.
+TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool();
+
 } // end namespace mozilla
 
 #endif
--- a/content/media/mediasource/MediaSourceDecoder.cpp
+++ b/content/media/mediasource/MediaSourceDecoder.cpp
@@ -38,16 +38,18 @@ class TimeRanges;
 
 } // namespace dom
 
 class MediaSourceReader : public MediaDecoderReader
 {
 public:
   MediaSourceReader(MediaSourceDecoder* aDecoder, dom::MediaSource* aSource)
     : MediaDecoderReader(aDecoder)
+    , mTimeThreshold(-1)
+    , mDropVideoBeforeThreshold(false)
     , mActiveVideoDecoder(-1)
     , mActiveAudioDecoder(-1)
     , mMediaSource(aSource)
   {
   }
 
   nsresult Init(MediaDecoderReader* aCloneDonor) MOZ_OVERRIDE
   {
@@ -57,63 +59,82 @@ public:
     return NS_OK;
   }
 
   bool IsWaitingMediaResources() MOZ_OVERRIDE
   {
     return mDecoders.IsEmpty() && mPendingDecoders.IsEmpty();
   }
 
-  bool DecodeAudioData() MOZ_OVERRIDE
+  void RequestAudioData() MOZ_OVERRIDE
   {
     if (!GetAudioReader()) {
       MSE_DEBUG("%p DecodeAudioFrame called with no audio reader", this);
       MOZ_ASSERT(mPendingDecoders.IsEmpty());
-      return false;
+      GetCallback()->OnDecodeError();
+      return;
     }
-    bool rv = GetAudioReader()->DecodeAudioData();
-
-    nsAutoTArray<AudioData*, 10> audio;
-    GetAudioReader()->AudioQueue().GetElementsAfter(-1, &audio);
-    for (uint32_t i = 0; i < audio.Length(); ++i) {
-      AudioQueue().Push(audio[i]);
-    }
-    GetAudioReader()->AudioQueue().Empty();
-
-    return rv;
+    GetAudioReader()->RequestAudioData();
   }
 
-  bool DecodeVideoFrame(bool& aKeyFrameSkip, int64_t aTimeThreshold) MOZ_OVERRIDE
+  void OnAudioDecoded(AudioData* aSample)
+  {
+    GetCallback()->OnAudioDecoded(aSample);
+  }
+
+  void OnAudioEOS()
+  {
+    GetCallback()->OnAudioEOS();
+  }
+
+  void RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) MOZ_OVERRIDE
   {
     if (!GetVideoReader()) {
       MSE_DEBUG("%p DecodeVideoFrame called with no video reader", this);
       MOZ_ASSERT(mPendingDecoders.IsEmpty());
-      return false;
+      GetCallback()->OnDecodeError();
+      return;
     }
-
-    if (MaybeSwitchVideoReaders(aTimeThreshold)) {
-      GetVideoReader()->DecodeToTarget(aTimeThreshold);
-    }
-
-    bool rv = GetVideoReader()->DecodeVideoFrame(aKeyFrameSkip, aTimeThreshold);
+    mTimeThreshold = aTimeThreshold;
+    GetVideoReader()->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
+  }
 
-    nsAutoTArray<VideoData*, 10> video;
-    GetVideoReader()->VideoQueue().GetElementsAfter(-1, &video);
-    for (uint32_t i = 0; i < video.Length(); ++i) {
-      VideoQueue().Push(video[i]);
+  void OnVideoDecoded(VideoData* aSample)
+  {
+    if (mDropVideoBeforeThreshold) {
+      if (aSample->mTime < mTimeThreshold) {
+        delete aSample;
+        GetVideoReader()->RequestVideoData(false, mTimeThreshold);
+      } else {
+        mDropVideoBeforeThreshold = false;
+        GetCallback()->OnVideoDecoded(aSample);
+      }
+    } else {
+      GetCallback()->OnVideoDecoded(aSample);
     }
-    GetVideoReader()->VideoQueue().Empty();
+  }
 
-    if (rv) {
-      return true;
-    }
-
+  void OnVideoEOS()
+  {
+    // End of stream. See if we can switch to another video decoder.
     MSE_DEBUG("%p MSR::DecodeVF %d (%p) returned false (readers=%u)",
               this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
-    return rv;
+    if (MaybeSwitchVideoReaders()) {
+      // Success! Resume decoding with next video decoder.
+      RequestVideoData(false, mTimeThreshold);
+    } else {
+      // End of stream.
+      MSE_DEBUG("%p MSR::DecodeVF %d (%p) EOS (readers=%u)",
+                this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
+      GetCallback()->OnVideoEOS();
+    }
+  }
+
+  void OnDecodeError() {
+    GetCallback()->OnDecodeError();
   }
 
   bool HasVideo() MOZ_OVERRIDE
   {
     return mInfo.HasVideo();
   }
 
   bool HasAudio() MOZ_OVERRIDE
@@ -121,37 +142,57 @@ public:
     return mInfo.HasAudio();
   }
 
   nsresult ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) MOZ_OVERRIDE;
   nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
                 int64_t aCurrentTime) MOZ_OVERRIDE;
   nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime) MOZ_OVERRIDE;
   already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
-                                                      MediaSourceDecoder* aParentDecoder);
+                                                      MediaSourceDecoder* aParentDecoder,
+                                                      MediaTaskQueue* aTaskQueue);
+
+  void Shutdown() MOZ_OVERRIDE {
+    MediaDecoderReader::Shutdown();
+    for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
+      mDecoders[i]->GetReader()->Shutdown();
+    }
+  }
+
+  virtual void BreakCycles() MOZ_OVERRIDE {
+    MediaDecoderReader::BreakCycles();
+    for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
+      mDecoders[i]->GetReader()->BreakCycles();
+    }
+  }
 
   void InitializePendingDecoders();
 
   bool IsShutdown() {
     ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
     return mDecoder->IsShutdown();
   }
 
 private:
-  bool MaybeSwitchVideoReaders(int64_t aTimeThreshold) {
+
+  // These are read and written on the decode task queue threads.
+  int64_t mTimeThreshold;
+  bool mDropVideoBeforeThreshold;
+
+  bool MaybeSwitchVideoReaders() {
     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
     MOZ_ASSERT(mActiveVideoDecoder != -1);
 
     InitializePendingDecoders();
 
     for (uint32_t i = mActiveVideoDecoder + 1; i < mDecoders.Length(); ++i) {
       if (!mDecoders[i]->GetReader()->GetMediaInfo().HasVideo()) {
         continue;
       }
-      if (aTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
+      if (mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
         GetVideoReader()->SetIdle();
 
         mActiveVideoDecoder = i;
         MSE_DEBUG("%p MSR::DecodeVF switching to %d", this, mActiveVideoDecoder);
 
         return true;
       }
     }
@@ -191,17 +232,17 @@ public:
   {
   }
 
   already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
                                                       MediaSourceDecoder* aParentDecoder) {
     if (!mReader) {
       return nullptr;
     }
-    return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder);
+    return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder, mDecodeTaskQueue);
   }
 
   nsresult EnqueueDecoderInitialization() {
     AssertCurrentThreadInMonitor();
     if (!mReader) {
       return NS_ERROR_FAILURE;
     }
     return mDecodeTaskQueue->Dispatch(NS_NewRunnableMethod(this,
@@ -361,25 +402,34 @@ MediaSourceReader::InitializePendingDeco
     }
   }
   NS_DispatchToMainThread(new ReleaseDecodersTask(mPendingDecoders));
   MOZ_ASSERT(mPendingDecoders.IsEmpty());
   mDecoder->NotifyWaitingForResourcesStatusChanged();
 }
 
 already_AddRefed<SubBufferDecoder>
-MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder* aParentDecoder)
+MediaSourceReader::CreateSubDecoder(const nsACString& aType,
+                                    MediaSourceDecoder* aParentDecoder,
+                                    MediaTaskQueue* aTaskQueue)
 {
   // XXX: Why/when is mDecoder null here, since it should be equal to aParentDecoder?!
   nsRefPtr<SubBufferDecoder> decoder =
     new SubBufferDecoder(new SourceBufferResource(nullptr, aType), aParentDecoder);
   nsAutoPtr<MediaDecoderReader> reader(DecoderTraits::CreateReader(aType, decoder));
   if (!reader) {
     return nullptr;
   }
+  // Set a callback on the subreader that forwards calls to this reader.
+  // This reader will then forward them onto the state machine via this
+  // reader's callback.
+  RefPtr<MediaDataDecodedListener<MediaSourceReader>> callback =
+    new MediaDataDecodedListener<MediaSourceReader>(this, aTaskQueue);
+  reader->SetCallback(callback);
+  reader->SetTaskQueue(aTaskQueue);
   reader->Init(nullptr);
   ReentrantMonitorAutoEnter mon(aParentDecoder->GetReentrantMonitor());
   MSE_DEBUG("Registered subdecoder %p subreader %p", decoder.get(), reader.get());
   decoder->SetReader(reader.forget());
   mPendingDecoders.AppendElement(decoder);
   if (NS_FAILED(static_cast<MediaSourceDecoder*>(mDecoder)->EnqueueDecoderInitialization())) {
     MSE_DEBUG("%p: Failed to enqueue decoder initialization task", this);
     return nullptr;
@@ -419,17 +469,17 @@ MediaSourceReader::Seek(int64_t aTime, i
 
   // Loop until we have the requested time range in the source buffers.
   // This is a workaround for our lack of async functionality in the
   // MediaDecoderStateMachine. Bug 979104 implements what we need and
   // we'll remove this for an async approach based on that in bug XXXXXXX.
   while (!mMediaSource->ActiveSourceBuffers()->AllContainsTime (aTime / USECS_PER_S)
          && !IsShutdown()) {
     mMediaSource->WaitForData();
-    MaybeSwitchVideoReaders(aTime);
+    MaybeSwitchVideoReaders();
   }
 
   if (IsShutdown()) {
     return NS_OK;
   }
 
   ResetDecode();
   if (GetAudioReader()) {
--- a/content/media/moz.build
+++ b/content/media/moz.build
@@ -73,16 +73,17 @@ EXPORTS += [
     'BufferMediaResource.h',
     'DecoderTraits.h',
     'DOMMediaStream.h',
     'EncodedBufferCache.h',
     'FileBlockCache.h',
     'Latency.h',
     'MediaCache.h',
     'MediaData.h',
+    'MediaDataDecodedListener.h',
     'MediaDecoder.h',
     'MediaDecoderOwner.h',
     'MediaDecoderReader.h',
     'MediaDecoderStateMachine.h',
     'MediaInfo.h',
     'MediaMetadataManager.h',
     'MediaQueue.h',
     'MediaRecorder.h',
--- a/content/media/omx/MediaOmxReader.cpp
+++ b/content/media/omx/MediaOmxReader.cpp
@@ -54,26 +54,32 @@ MediaOmxReader::MediaOmxReader(AbstractM
   }
 #endif
 
   mAudioChannel = dom::AudioChannelService::GetDefaultAudioChannel();
 }
 
 MediaOmxReader::~MediaOmxReader()
 {
-  ReleaseMediaResources();
-  ReleaseDecoder();
-  mOmxDecoder.clear();
 }
 
 nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
 {
   return NS_OK;
 }
 
+void MediaOmxReader::Shutdown()
+{
+  ReleaseMediaResources();
+  if (mOmxDecoder.get()) {
+    mOmxDecoder->ReleaseDecoder();
+  }
+  mOmxDecoder.clear();
+}
+
 bool MediaOmxReader::IsWaitingMediaResources()
 {
   if (!mOmxDecoder.get()) {
     return false;
   }
   return mOmxDecoder->IsWaitingMediaResources();
 }
 
@@ -94,23 +100,16 @@ void MediaOmxReader::ReleaseMediaResourc
   if (container) {
     container->ClearCurrentFrame();
   }
   if (mOmxDecoder.get()) {
     mOmxDecoder->ReleaseMediaResources();
   }
 }
 
-void MediaOmxReader::ReleaseDecoder()
-{
-  if (mOmxDecoder.get()) {
-    mOmxDecoder->ReleaseDecoder();
-  }
-}
-
 nsresult MediaOmxReader::InitOmxDecoder()
 {
   if (!mOmxDecoder.get()) {
     //register sniffers, if they are not registered in this process.
     DataSource::RegisterDefaultSniffers();
     mDecoder->GetResource()->SetReadMode(MediaCacheStream::MODE_METADATA);
 
     sp<DataSource> dataSource = new MediaStreamSource(mDecoder->GetResource(), mDecoder);
@@ -370,17 +369,16 @@ bool MediaOmxReader::DecodeAudioData()
                                       source.mAudioChannels));
 }
 
 nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   EnsureActive();
 
-  ResetDecode();
   VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
   if (container && container->GetImageContainer()) {
     container->GetImageContainer()->ClearAllImagesExceptFront();
   }
 
   if (mHasAudio && mHasVideo) {
     // The OMXDecoder seeks/demuxes audio and video streams separately. So if
     // we seek both audio and video to aTarget, the audio stream can typically
--- a/content/media/omx/MediaOmxReader.h
+++ b/content/media/omx/MediaOmxReader.h
@@ -75,24 +75,24 @@ public:
     return mHasVideo;
   }
 
   virtual bool IsWaitingMediaResources();
 
   virtual bool IsDormantNeeded();
   virtual void ReleaseMediaResources();
 
-  virtual void ReleaseDecoder() MOZ_OVERRIDE;
-
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
 
   virtual void SetIdle() MOZ_OVERRIDE;
 
+  virtual void Shutdown() MOZ_OVERRIDE;
+
   void SetAudioChannel(dom::AudioChannel aAudioChannel) {
     mAudioChannel = aAudioChannel;
   }
 
   android::sp<android::MediaSource> GetAudioOffloadTrack() {
     return mAudioOffloadTrack;
   }
 
--- a/content/media/plugins/MediaPluginReader.cpp
+++ b/content/media/plugins/MediaPluginReader.cpp
@@ -30,21 +30,16 @@ MediaPluginReader::MediaPluginReader(Abs
   mPlugin(nullptr),
   mHasAudio(false),
   mHasVideo(false),
   mVideoSeekTimeUs(-1),
   mAudioSeekTimeUs(-1)
 {
 }
 
-MediaPluginReader::~MediaPluginReader()
-{
-  ResetDecode();
-}
-
 nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor)
 {
   return NS_OK;
 }
 
 nsresult MediaPluginReader::ReadMetadata(MediaInfo* aInfo,
                                          MetadataTags** aTags)
 {
@@ -99,28 +94,32 @@ nsresult MediaPluginReader::ReadMetadata
     mInfo.mAudio.mRate = sampleRate;
   }
 
  *aInfo = mInfo;
  *aTags = nullptr;
   return NS_OK;
 }
 
+void MediaPluginReader::Shutdown()
+{
+  ResetDecode();
+  if (mPlugin) {
+    GetMediaPluginHost()->DestroyDecoder(mPlugin);
+    mPlugin = nullptr;
+  }
+}
+
 // Resets all state related to decoding, emptying all buffers etc.
 nsresult MediaPluginReader::ResetDecode()
 {
   if (mLastVideoFrame) {
     mLastVideoFrame = nullptr;
   }
-  if (mPlugin) {
-    GetMediaPluginHost()->DestroyDecoder(mPlugin);
-    mPlugin = nullptr;
-  }
-
-  return NS_OK;
+  return MediaDecoderReader::ResetDecode();
 }
 
 bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip,
                                          int64_t aTimeThreshold)
 {
   // Record number of frames decoded and parsed. Automatically update the
   // stats counters using the AutoNotifyDecoded stack-based class.
   uint32_t parsed = 0, decoded = 0;
@@ -316,19 +315,16 @@ bool MediaPluginReader::DecodeAudioData(
                                      source.mSize,
                                      source.mAudioChannels));
 }
 
 nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
 {
   NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
 
-  mVideoQueue.Reset();
-  mAudioQueue.Reset();
-
   if (mHasAudio && mHasVideo) {
     // The decoder seeks/demuxes audio and video streams separately. So if
     // we seek both audio and video to aTarget, the audio stream can typically
     // seek closer to the seek target, since typically every audio block is
     // a sync point, whereas for video there are only keyframes once every few
     // seconds. So if we have both audio and video, we must seek the video
     // stream to the preceeding keyframe first, get the stream time, and then
     // seek the audio stream to match the video stream's time. Otherwise, the
--- a/content/media/plugins/MediaPluginReader.h
+++ b/content/media/plugins/MediaPluginReader.h
@@ -38,17 +38,16 @@ class MediaPluginReader : public MediaDe
   nsIntRect mPicture;
   nsIntSize mInitialFrame;
   int64_t mVideoSeekTimeUs;
   int64_t mAudioSeekTimeUs;
   nsAutoPtr<VideoData> mLastVideoFrame;
 public:
   MediaPluginReader(AbstractMediaDecoder* aDecoder,
                     const nsACString& aContentType);
-  ~MediaPluginReader();
 
   virtual nsresult Init(MediaDecoderReader* aCloneDonor);
   virtual nsresult ResetDecode();
 
   virtual bool DecodeAudioData();
   virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
                                 int64_t aTimeThreshold);
 
@@ -61,16 +60,18 @@ public:
   {
     return mHasVideo;
   }
 
   virtual nsresult ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags);
   virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
 
+  virtual void Shutdown() MOZ_OVERRIDE;
+
   class ImageBufferCallback : public MPAPI::BufferCallback {
     typedef mozilla::layers::Image Image;
 
   public:
     ImageBufferCallback(mozilla::layers::ImageContainer *aImageContainer);
     void *operator()(size_t aWidth, size_t aHeight,
                      MPAPI::ColorFormat aColorFormat) MOZ_OVERRIDE;
     already_AddRefed<Image> GetImage();
--- a/content/media/test/manifest.js
+++ b/content/media/test/manifest.js
@@ -384,17 +384,17 @@ function IsWindows8OrLater() {
 
 // These are files that are non seekable, due to problems with the media,
 // for example broken or missing indexes.
 var gUnseekableTests = [
   { name:"no-cues.webm", type:"video/webm" },
   { name:"bogus.duh", type:"bogus/duh"}
 ];
 // Unfortunately big-buck-bunny-unseekable.mp4 is doesn't play on Windows 7, so
-// only include it in the unseekable tests if we're on later versions of Windows. 
+// only include it in the unseekable tests if we're on later versions of Windows.
 // This test actually only passes on win8 at the moment.
 if (navigator.userAgent.indexOf("Windows") != -1 && IsWindows8OrLater()) {
   gUnseekableTests = gUnseekableTests.concat([
     { name:"big-buck-bunny-unseekable.mp4", type:"video/mp4" }
   ]);
 }
 // Android supports fragmented MP4 playback from 4.3.
 var androidVersion = SpecialPowers.Cc['@mozilla.org/system-info;1']
@@ -672,30 +672,46 @@ function MediaTestManager() {
   // Registers that the test corresponding to 'token' has been started.
   // Don't call more than once per token.
   this.started = function(token) {
     this.tokens.push(token);
     this.numTestsRunning++;
     is(this.numTestsRunning, this.tokens.length, "[started " + token + "] Length of array should match number of running tests");
   }
 
+  this.watchdog = null;
+
+  this.watchdogFn = function() {
+    if (this.tokens.length > 0) {
+      info("Watchdog remaining tests= " + this.tokens);
+    }
+  }
+
   // Registers that the test corresponding to 'token' has finished. Call when
   // you've finished your test. If all tests are complete this will finish the
   // run, otherwise it may start up the next run. It's ok to call multiple times
   // per token.
   this.finished = function(token) {
     var i = this.tokens.indexOf(token);
     if (i != -1) {
       // Remove the element from the list of running tests.
       this.tokens.splice(i, 1);
     }
+
+    if (this.watchdog) {
+      clearTimeout(this.watchdog);
+      this.watchdog = null;
+    }
+
+    info("[finished " + token + "] remaining= " + this.tokens);
     this.numTestsRunning--;
     is(this.numTestsRunning, this.tokens.length, "[finished " + token + "] Length of array should match number of running tests");
     if (this.tokens.length < PARALLEL_TESTS) {
       this.nextTest();
+      this.watchdog = setTimeout(this.watchdogFn.bind(this), 10000);
     }
   }
 
   // Starts the next batch of tests, or finishes if they're all done.
   // Don't call this directly, call finished(token) when you're done.
   this.nextTest = function() {
     // Force an exact  GC after every completed testcase. This ensures that any
     // decoders with live threads waiting for the GC are killed promptly, to free
--- a/content/media/test/test_bug465498.html
+++ b/content/media/test/test_bug465498.html
@@ -9,44 +9,53 @@
 <body>
 <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=465498">Mozilla Bug 465498</a>
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
 var manager = new MediaTestManager;
 
 function startTest(e) {
+  var v = e.target;
+  info(v._name + " loadedmetadata");
   e.target.play();
 }
 
 function playbackEnded(e) {
   var v = e.target;
+  info(v._name + " ended");
   if (v._finished)
     return;
   ok(v.currentTime >= v.duration - 0.1 && v.currentTime <= v.duration + 0.1,
      "Checking currentTime at end: " + v.currentTime + " for " + v._name);
   ok(v.ended, "Checking playback has ended for " + v._name);
   v.pause();
   v.currentTime = 0;
   ok(!v.ended, "Checking ended is no longer true for " + v._name);
   v._seeked = true;
 }
 
 function seekEnded(e) {
   var v = e.target;
+  info(v._name + " seeked");
   if (v._finished)
     return;
   ok(v.currentTime == 0, "Checking currentTime after seek: " +
      v.currentTime  + " for " + v._name);
   ok(!v.ended, "Checking ended is false for " + v._name);
   v._finished = true;
   v.parentNode.removeChild(v);
   manager.finished(v.token);
 }
 
+function seeking(e) {
+  var v = e.target;
+  info(v._name + " seeking");
+}
+
 function initTest(test, token) {
   var type = getMajorMimeType(test.type);
   var v = document.createElement(type);
   if (!v.canPlayType(test.type))
     return;
   v.preload = "metadata";
   v.token = token;
   manager.started(token);
@@ -57,16 +66,17 @@ function initTest(test, token) {
   s.src = test.name;
   v.appendChild(s);
 
   v._seeked = false;
   v._finished = false;
   v.addEventListener("loadedmetadata", startTest, false);
   v.addEventListener("ended", playbackEnded, false);
   v.addEventListener("seeked", seekEnded, false);
+  v.addEventListener("seeking", seeking, false);
   document.body.appendChild(v);
 }
 
 manager.runTests(gSmallTests, initTest);
 
 </script>
 </pre>
 </body>
--- a/content/media/test/test_bug493187.html
+++ b/content/media/test/test_bug493187.html
@@ -15,34 +15,49 @@ https://bugzilla.mozilla.org/show_bug.cg
 <pre id="test">
 <script class="testbody" type="text/javascript">
 
 SimpleTest.expectAssertions(0, 2);
 
 var manager = new MediaTestManager;
 
 function start(e) {
+  var v = e.target;
+  info("[" + v._name + "] start");
   e.target.currentTime = e.target.duration / 4;
 }
 
 function startSeeking(e) {
+  var v = e.target;
+  info("[" + v._name + "] seeking");
   e.target._seeked = true;
 }
 
 function canPlayThrough(e) {
   var v = e.target;
+  info("[" + v._name + "] canPlayThrough");
   if (v._seeked && !v._finished) {
-    ok(true, "Got canplaythrough after seek for " + v._name);
+    ok(true, "[" + v._name + "] got canplaythrough after seek");
     v._finished = true;
     v.parentNode.removeChild(v);
     v.src = "";
     manager.finished(v.token);
   }
 }
 
+function seeked(e) {
+  var v = e.target;
+  info("[" + v._name + "] seeked");
+}
+
+function error(e) {
+  var v = e.target;
+  info("[" + v._name + "] error");
+}
+
 function startTest(test, token) {
   // TODO: Bug 568402, there's a bug in the WAV backend where we sometimes
   // don't send canplaythrough events after seeking. Once that is fixed,
   // we should remove this guard below so that we run this test for audio.
   var type = getMajorMimeType(test.type);
   if (type != "video")
     return;
 
@@ -53,16 +68,18 @@ function startTest(test, token) {
   v.src = test.name;
   v._name = test.name;
   v._seeked = false;
   v._finished = false;
   v.preload = "auto";
   v.addEventListener("loadedmetadata", start, false);
   v.addEventListener("canplaythrough", canPlayThrough, false);
   v.addEventListener("seeking", startSeeking, false);
+  v.addEventListener("seeked", seeked, false);
+  v.addEventListener("error", error, false);
   document.body.appendChild(v);
 }
 
 SimpleTest.waitForExplicitFinish();
 SpecialPowers.pushPrefEnv({"set": [["media.cache_size", 40000]]}, beginTest);
 function beginTest() {
   manager.runTests(gSeekTests, startTest);
 }
--- a/content/media/test/test_seek.html
+++ b/content/media/test/test_seek.html
@@ -56,32 +56,32 @@ function createTestArray() {
       tests.push(t);
     }
   }
   return tests;
 }
 
 function startTest(test, token) {
   var v = document.createElement('video');
-  manager.started(token);
+  v.token = token += "-seek" + test.number + ".js";
+  manager.started(v.token);
   v.src = test.name;
   v.preload = "metadata";
-  v.token = token;
   document.body.appendChild(v);
   var name = test.name + " seek test " + test.number;
   var localIs = function(name) { return function(a, b, msg) {
     is(a, b, name + ": " + msg);
   }}(name);
   var localOk = function(name) { return function(a, msg) {
     ok(a, name + ": " + msg);
   }}(name);
   var localFinish = function(v, manager) { return function() {
     v.onerror = null;
     removeNodeAndSource(v);
-    dump("SEEK-TEST: Finished " + name + "\n");
+    dump("SEEK-TEST: Finished " + name + " token: " + v.token + "\n");
     manager.finished(v.token);
   }}(v, manager);
   dump("SEEK-TEST: Started " + name + "\n");
   window['test_seek' + test.number](v, test.duration/2, localIs, localOk, localFinish);
 }
 
 manager.runTests(createTestArray(), startTest);
 
--- a/content/media/webaudio/AudioContext.cpp
+++ b/content/media/webaudio/AudioContext.cpp
@@ -549,16 +549,18 @@ AudioContext::Shutdown()
 
   // We mute rather than suspending, because the delay between the ::Shutdown
   // call and the CC would make us overbuffer in the MediaStreamGraph.
   // See bug 936784 for details.
   if (!mIsOffline) {
     Mute();
   }
 
+  mDecoder.Shutdown();
+
   // Release references to active nodes.
   // Active AudioNodes don't unregister in destructors, at which point the
   // Node is already unregistered.
   mActiveNodes.Clear();
 
   // For offline contexts, we can destroy the MediaStreamGraph at this point.
   if (mIsOffline && mDestination) {
     mDestination->OfflineShutdown();
--- a/content/media/webaudio/MediaBufferDecoder.cpp
+++ b/content/media/webaudio/MediaBufferDecoder.cpp
@@ -247,22 +247,35 @@ MediaDecodeTask::Decode()
     return;
   }
 
   if (!mDecoderReader->HasAudio()) {
     ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
     return;
   }
 
-  while (mDecoderReader->DecodeAudioData()) {
-    // consume all of the buffer
-    continue;
+  MediaQueue<AudioData> audioQueue;
+  nsRefPtr<AudioDecodeRendezvous> barrier(new AudioDecodeRendezvous());
+  mDecoderReader->SetCallback(barrier);
+  while (1) {
+    mDecoderReader->RequestAudioData();
+    nsAutoPtr<AudioData> audio;
+    if (NS_FAILED(barrier->Await(audio))) {
+      ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
+      return;
+    }
+    if (!audio) {
+      // End of stream.
+      break;
+    }
+    audioQueue.Push(audio.forget());
   }
+  mDecoderReader->Shutdown();
+  mDecoderReader->BreakCycles();
 
-  MediaQueue<AudioData>& audioQueue = mDecoderReader->AudioQueue();
   uint32_t frameCount = audioQueue.FrameCount();
   uint32_t channelCount = mediaInfo.mAudio.mChannels;
   uint32_t sampleRate = mediaInfo.mAudio.mRate;
 
   if (!frameCount || !channelCount || !sampleRate) {
     ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
     return;
   }
@@ -464,24 +477,36 @@ MediaBufferDecoder::AsyncDecodeMedia(con
     mThreadPool->Dispatch(task, nsIThreadPool::DISPATCH_NORMAL);
   }
 }
 
 bool
 MediaBufferDecoder::EnsureThreadPoolInitialized()
 {
   if (!mThreadPool) {
-    mThreadPool = SharedThreadPool::Get(NS_LITERAL_CSTRING("MediaBufferDecoder"));
+    mThreadPool = do_CreateInstance(NS_THREADPOOL_CONTRACTID);
     if (!mThreadPool) {
       return false;
     }
+    mThreadPool->SetName(NS_LITERAL_CSTRING("MediaBufferDecoder"));
   }
   return true;
 }
 
+void
+MediaBufferDecoder::Shutdown() {
+  if (mThreadPool) {
+    // Setting threadLimit to 0 causes threads to exit when all events have
+    // been run, like nsIThreadPool::Shutdown(), but doesn't run a nested event
+    // loop nor wait until this has happened.
+    mThreadPool->SetThreadLimit(0);
+    mThreadPool = nullptr;
+  }
+}
+
 WebAudioDecodeJob::WebAudioDecodeJob(const nsACString& aContentType,
                                      AudioContext* aContext,
                                      DecodeSuccessCallback* aSuccessCallback,
                                      DecodeErrorCallback* aFailureCallback)
   : mContentType(aContentType)
   , mWriteIndex(0)
   , mContext(aContext)
   , mSuccessCallback(aSuccessCallback)
--- a/content/media/webaudio/MediaBufferDecoder.h
+++ b/content/media/webaudio/MediaBufferDecoder.h
@@ -5,22 +5,21 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MediaBufferDecoder_h_
 #define MediaBufferDecoder_h_
 
 #include "nsWrapperCache.h"
 #include "nsCOMPtr.h"
 #include "nsAutoPtr.h"
-#include "SharedThreadPool.h"
+#include "nsIThreadPool.h"
 #include "nsString.h"
 #include "nsTArray.h"
 #include "mozilla/dom/TypedArray.h"
 #include "mozilla/MemoryReporting.h"
-#include "mozilla/RefPtr.h"
 
 namespace mozilla {
 
 namespace dom {
 class AudioBuffer;
 class AudioContext;
 class DecodeErrorCallback;
 class DecodeSuccessCallback;
@@ -74,24 +73,26 @@ struct WebAudioDecodeJob MOZ_FINAL
  * thread-pool) and provides a clean external interface.
  */
 class MediaBufferDecoder
 {
 public:
   void AsyncDecodeMedia(const char* aContentType, uint8_t* aBuffer,
                         uint32_t aLength, WebAudioDecodeJob& aDecodeJob);
 
+  void Shutdown();
+
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
   {
     return 0;
   }
 
 private:
   bool EnsureThreadPoolInitialized();
 
 private:
-  RefPtr<SharedThreadPool> mThreadPool;
+  nsCOMPtr<nsIThreadPool> mThreadPool;
 };
 
 }
 
 #endif
 
--- a/dom/base/nsFocusManager.cpp
+++ b/dom/base/nsFocusManager.cpp
@@ -228,17 +228,17 @@ nsFocusManager::Observe(nsISupports *aSu
   } else if (!nsCRT::strcmp(aTopic, "xpcom-shutdown")) {
     mActiveWindow = nullptr;
     mFocusedWindow = nullptr;
     mFocusedContent = nullptr;
     mFirstBlurEvent = nullptr;
     mFirstFocusEvent = nullptr;
     mWindowBeingLowered = nullptr;
     mDelayedBlurFocusEvents.Clear();
-    mMouseDownEventHandlingDocument = nullptr;
+    mMouseButtonEventHandlingDocument = nullptr;
   }
 
   return NS_OK;
 }
 
 // given a frame content node, retrieve the nsIDOMWindow displayed in it 
 static nsPIDOMWindow*
 GetContentWindow(nsIContent* aContent)
@@ -1193,20 +1193,20 @@ nsFocusManager::SetFocusInner(nsIContent
   // we need to check the permission.
   if (sendFocusEvent && mFocusedContent &&
       mFocusedContent->OwnerDoc() != aNewContent->OwnerDoc()) {
     // If the caller cannot access the current focused node, the caller should
     // not be able to steal focus from it. E.g., When the current focused node
     // is in chrome, any web contents should not be able to steal the focus.
     nsCOMPtr<nsIDOMNode> domNode(do_QueryInterface(mFocusedContent));
     sendFocusEvent = nsContentUtils::CanCallerAccess(domNode);
-    if (!sendFocusEvent && mMouseDownEventHandlingDocument) {
-      // However, while mouse down event is handling, the handling document's
+    if (!sendFocusEvent && mMouseButtonEventHandlingDocument) {
+      // However, while mouse button event is handling, the handling document's
       // script should be able to steal focus.
-      domNode = do_QueryInterface(mMouseDownEventHandlingDocument);
+      domNode = do_QueryInterface(mMouseButtonEventHandlingDocument);
       sendFocusEvent = nsContentUtils::CanCallerAccess(domNode);
     }
   }
 
   LOGCONTENT("Shift Focus: %s", contentToFocus.get());
   LOGFOCUS((" Flags: %x Current Window: %p New Window: %p Current Element: %p",
            aFlags, mFocusedWindow.get(), newWindow.get(), mFocusedContent.get()));
   LOGFOCUS((" In Active Window: %d In Focused Window: %d SendFocus: %d",
@@ -3463,18 +3463,18 @@ nsFocusManager::MarkUncollectableForCCGe
   if (sInstance->mFirstBlurEvent) {
     sInstance->mFirstBlurEvent->OwnerDoc()->
       MarkUncollectableForCCGeneration(aGeneration);
   }
   if (sInstance->mFirstFocusEvent) {
     sInstance->mFirstFocusEvent->OwnerDoc()->
       MarkUncollectableForCCGeneration(aGeneration);
   }
-  if (sInstance->mMouseDownEventHandlingDocument) {
-    sInstance->mMouseDownEventHandlingDocument->
+  if (sInstance->mMouseButtonEventHandlingDocument) {
+    sInstance->mMouseButtonEventHandlingDocument->
       MarkUncollectableForCCGeneration(aGeneration);
   }
 }
 
 nsresult
 NS_NewFocusManager(nsIFocusManager** aResult)
 {
   NS_IF_ADDREF(*aResult = nsFocusManager::GetFocusManager());
--- a/dom/base/nsFocusManager.h
+++ b/dom/base/nsFocusManager.h
@@ -70,23 +70,24 @@ public:
   nsPIDOMWindow* GetActiveWindow() const { return mActiveWindow; }
 
   /**
    * Called when content has been removed.
    */
   nsresult ContentRemoved(nsIDocument* aDocument, nsIContent* aContent);
 
   /**
-   * Called when mouse button down event handling is started and finished.
+   * Called when mouse button event handling is started and finished.
    */
-  void SetMouseButtonDownHandlingDocument(nsIDocument* aDocument)
+  already_AddRefed<nsIDocument>
+    SetMouseButtonHandlingDocument(nsIDocument* aDocument)
   {
-    NS_ASSERTION(!aDocument || !mMouseDownEventHandlingDocument,
-                 "Some mouse button down events are nested?");
-    mMouseDownEventHandlingDocument = aDocument;
+    nsCOMPtr<nsIDocument> handlingDocument = mMouseButtonEventHandlingDocument;
+    mMouseButtonEventHandlingDocument = aDocument;
+    return handlingDocument.forget();
   }
 
   /**
    * Update the caret with current mode (whether in caret browsing mode or not).
    */
   void UpdateCaretForCaretBrowsingMode();
 
   /**
@@ -510,23 +511,25 @@ private:
 
   // keep track of a window while it is being lowered
   nsCOMPtr<nsPIDOMWindow> mWindowBeingLowered;
 
   // synchronized actions cannot be interrupted with events, so queue these up
   // and fire them later.
   nsTArray<nsDelayedBlurOrFocusEvent> mDelayedBlurFocusEvents;
 
-  // A document which is handling a mouse button down event.
+  // A document which is handling a mouse button event.
   // When a mouse down event process is finished, ESM sets focus to the target
-  // content.  Therefore, while DOM event handlers are handling mouse down
-  // events, the handlers should be able to steal focus from any elements even
-  // if focus is in chrome content.  So, if this isn't nullptr and the caller
-  // can access the document node, the caller should succeed in moving focus.
-  nsCOMPtr<nsIDocument> mMouseDownEventHandlingDocument;
+  // content if it's not consumed.  Therefore, while DOM event handlers are
+  // handling mouse down events or preceding mosue down event is consumed but
+  // handling mouse up events, they should be able to steal focus from any
+  // elements even if focus is in chrome content.  So, if this isn't nullptr
+  // and the caller can access the document node, the caller should succeed in
+  // moving focus.
+  nsCOMPtr<nsIDocument> mMouseButtonEventHandlingDocument;
 
   static bool sTestMode;
 
   // the single focus manager
   static nsFocusManager* sInstance;
 };
 
 nsresult
new file mode 100644
--- /dev/null
+++ b/dom/base/test/iframe_bug976673.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <meta charset="utf-8">
+  <title>Test for Bug 976673</title>
+</head>
+<body>
+  <input id="input" onfocus="event.target.value = event.type;"
+                    onblur="event.target.value = event.type;">
+  <script>
+    var input = document.getElementById("input");
+    window.addEventListener("message", function (aEvent) {
+        switch (aEvent.data) {
+          case "init":
+            input.blur();
+            input.value = "";
+            input.focus();
+          case "check":
+            aEvent.source.postMessage("input-value: " + input.value, "*");
+            break;
+        }
+      }, false);
+  </script>
+</body>
+</html>
--- a/dom/base/test/mochitest.ini
+++ b/dom/base/test/mochitest.ini
@@ -1,24 +1,26 @@
 [DEFAULT]
 support-files =
   audio.ogg
+  iframe_bug976673.html
   iframe_messageChannel_cloning.html
   iframe_messageChannel_chrome.html
   iframe_messageChannel_pingpong.html
   iframe_messageChannel_post.html
   file_empty.html
   iframe_postMessage_solidus.html
   file_setname.html
 
 [test_appname_override.html]
 [test_audioWindowUtils.html]
 [test_audioNotification.html]
 [test_bug793311.html]
 [test_bug913761.html]
+[test_bug976673.html]
 [test_bug978522.html]
 [test_bug979109.html]
 [test_bug989665.html]
 [test_bug999456.html]
 [test_clearTimeoutIntervalNoArg.html]
 [test_consoleEmptyStack.html]
 [test_constructor-assignment.html]
 [test_constructor.html]
new file mode 100644
--- /dev/null
+++ b/dom/base/test/test_bug976673.html
@@ -0,0 +1,102 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=976673
+-->
+<head>
+  <meta charset="utf-8">
+  <title>Test for Bug 976673</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="application/javascript" src="/tests/SimpleTest/EventUtils.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=976673">Mozilla Bug 976673</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+
+</div>
+<pre id="test">
+</pre>
+<input id="input" onfocus="event.target.value = event.type;"
+                  onblur="event.target.value = event.type;">
+<button id="button">set focus</button>
+<iframe id="iframe" src="http://example.org:80/tests/dom/base/test/iframe_bug976673.html"></iframe>
+<script>
+
+SimpleTest.waitForExplicitFinish();
+
+window.addEventListener("mousedown", function (aEvent) { aEvent.preventDefault(); }, false);
+
+function testSetFocus(aEventType, aCallback)
+{
+  var description = "Setting focus from " + aEventType + " handler: ";
+
+  var iframe = document.getElementById("iframe");
+  iframe.contentWindow.focus();
+
+  window.addEventListener("message", function (aEvent) {
+    window.removeEventListener("message", arguments.callee, false);
+    is(aEvent.data, "input-value: focus",
+       description + "<input> in the iframe should get focus");
+
+
+    var input = document.getElementById("input");
+    input.value = "";
+
+    var button = document.getElementById("button");
+
+    var movingFocus = false;
+    button.addEventListener(aEventType,
+                            function (aEvent) {
+                              movingFocus = true;
+                              input.focus();
+                              aEvent.preventDefault();
+                              button.removeEventListener(aEventType, arguments.callee, true);
+                            }, true);
+
+    synthesizeMouseAtCenter(button, {});
+
+    window.addEventListener("message", function (aEvent) {
+      window.removeEventListener("message", arguments.callee, false);
+      if (movingFocus) {
+        is(aEvent.data, "input-value: blur",
+           description + "<input> in the iframe should get blur");
+        is(input.value, "focus",
+           description + "<input> in the parent should get focus");
+      } else {
+        is(aEvent.data, "input-value: focus",
+           description + "<input> in the iframe should keep having focus");
+      }
+
+      setTimeout(aCallback, 0);
+    }, false);
+
+    iframe.contentWindow.postMessage("check", "*");
+  }, false);
+
+  iframe.contentWindow.postMessage("init", "*");
+}
+
+function runTests()
+{
+  testSetFocus("mousedown",
+    function () {
+      testSetFocus("mouseup",
+        function () {
+          testSetFocus("click",
+            function () {
+              testSetFocus("DoNothing", // testing wihout moving focus by script
+                function () {
+                  SimpleTest.finish();
+                });
+            });
+        });
+    });
+}
+
+SimpleTest.waitForFocus(runTests);
+
+</script>
+</body>
+</html>
--- a/dom/base/test/test_domwindowutils.html
+++ b/dom/base/test/test_domwindowutils.html
@@ -26,34 +26,34 @@ function test_sendMouseEventDefaults() {
     is(evt.detail, clickCount, "check click count");
     is(evt.getModifierState("Shift"), true, "check modifiers");
 
     // Default value for optionals
     is(evt.mozPressure, 0, "check pressure");
     is(evt.mozInputSource, SpecialPowers.Ci.nsIDOMMouseEvent.MOZ_SOURCE_MOUSE, "check input source");
     is(evt.isSynthesized, undefined, "check isSynthesized is undefined in content");
     is(SpecialPowers.wrap(evt).isSynthesized, true, "check isSynthesized is true from chrome");
-    next();
+    SimpleTest.executeSoon(next);
   });
 
   // Only pass mandatory arguments and check default values
   utils.sendMouseEvent("mousedown", x, y, button, clickCount, modifiers);
 }
 
 function test_sendMouseEventOptionals() {
   var x = 1, y = 2, button = 1, clickCount = 3,
       modifiers = SpecialPowers.Ci.nsIDOMNSEvent.SHIFT_MASK,
       pressure = 0.5,
       source = SpecialPowers.Ci.nsIDOMMouseEvent.MOZ_SOURCE_KEYBOARD;
 
   window.addEventListener("mouseup", function listener(evt) {
     window.removeEventListener("mouseup", listener);
     is(evt.mozInputSource, source, "explicit input source is valid");
     is(SpecialPowers.wrap(evt).isSynthesized, false, "we can dispatch event that don't look synthesized");
-    next();
+    SimpleTest.executeSoon(next);
   });
 
   // Check explicit value for optional args
   utils.sendMouseEvent("mouseup", x, y, button, clickCount, modifiers,
                        false, pressure, source, false);
 }
 
 var tests = [
--- a/dom/events/EventStateManager.cpp
+++ b/dom/events/EventStateManager.cpp
@@ -5540,48 +5540,54 @@ EventStateManager::Prefs::GetAccessModif
 /******************************************************************/
 
 AutoHandlingUserInputStatePusher::AutoHandlingUserInputStatePusher(
                                     bool aIsHandlingUserInput,
                                     WidgetEvent* aEvent,
                                     nsIDocument* aDocument) :
   mIsHandlingUserInput(aIsHandlingUserInput),
   mIsMouseDown(aEvent && aEvent->message == NS_MOUSE_BUTTON_DOWN),
-  mResetFMMouseDownState(false)
+  mResetFMMouseButtonHandlingState(false)
 {
   if (!aIsHandlingUserInput) {
     return;
   }
   EventStateManager::StartHandlingUserInput();
-  if (!mIsMouseDown) {
+  if (mIsMouseDown) {
+    nsIPresShell::SetCapturingContent(nullptr, 0);
+    nsIPresShell::AllowMouseCapture(true);
+  }
+  if (!aDocument || !aEvent || !aEvent->mFlags.mIsTrusted) {
     return;
   }
-  nsIPresShell::SetCapturingContent(nullptr, 0);
-  nsIPresShell::AllowMouseCapture(true);
-  if (!aDocument || !aEvent->mFlags.mIsTrusted) {
-    return;
-  }
-  nsFocusManager* fm = nsFocusManager::GetFocusManager();
-  NS_ENSURE_TRUE_VOID(fm);
-  fm->SetMouseButtonDownHandlingDocument(aDocument);
-  mResetFMMouseDownState = true;
+  mResetFMMouseButtonHandlingState = (aEvent->message == NS_MOUSE_BUTTON_DOWN ||
+                                      aEvent->message == NS_MOUSE_BUTTON_UP);
+  if (mResetFMMouseButtonHandlingState) {
+    nsFocusManager* fm = nsFocusManager::GetFocusManager();
+    NS_ENSURE_TRUE_VOID(fm);
+    // If it's in modal state, mouse button event handling may be nested.
+    // E.g., a modal dialog is opened at mousedown or mouseup event handler
+    // and the dialog is clicked.  Therefore, we should store current
+    // mouse button event handling document if nsFocusManager already has it.
+    mMouseButtonEventHandlingDocument =
+      fm->SetMouseButtonHandlingDocument(aDocument);
+  }
 }
 
 AutoHandlingUserInputStatePusher::~AutoHandlingUserInputStatePusher()
 {
   if (!mIsHandlingUserInput) {
     return;
   }
   EventStateManager::StopHandlingUserInput();
-  if (!mIsMouseDown) {
-    return;
-  }
-  nsIPresShell::AllowMouseCapture(false);
-  if (!mResetFMMouseDownState) {
-    return;
-  }
-  nsFocusManager* fm = nsFocusManager::GetFocusManager();
-  NS_ENSURE_TRUE_VOID(fm);
-  fm->SetMouseButtonDownHandlingDocument(nullptr);
+  if (mIsMouseDown) {
+    nsIPresShell::AllowMouseCapture(false);
+  }
+  if (mResetFMMouseButtonHandlingState) {
+    nsFocusManager* fm = nsFocusManager::GetFocusManager();
+    NS_ENSURE_TRUE_VOID(fm);
+    nsCOMPtr<nsIDocument> handlingDocument =
+      fm->SetMouseButtonHandlingDocument(mMouseButtonEventHandlingDocument);
+  }
 }
 
 } // namespace mozilla
 
--- a/dom/events/EventStateManager.h
+++ b/dom/events/EventStateManager.h
@@ -907,17 +907,19 @@ public:
   AutoHandlingUserInputStatePusher(bool aIsHandlingUserInput,
                                    WidgetEvent* aEvent,
                                    nsIDocument* aDocument);
   ~AutoHandlingUserInputStatePusher();
 
 protected:
   bool mIsHandlingUserInput;
   bool mIsMouseDown;
-  bool mResetFMMouseDownState;
+  bool mResetFMMouseButtonHandlingState;
+
+  nsCOMPtr<nsIDocument> mMouseButtonEventHandlingDocument;
 
 private:
   // Hide so that this class can only be stack-allocated
   static void* operator new(size_t /*size*/) CPP_THROW_NEW { return nullptr; }
   static void operator delete(void* /*memory*/) {}
 };
 
 } // namespace mozilla
--- a/dom/media/tests/mochitest/pc.js
+++ b/dom/media/tests/mochitest/pc.js
@@ -1367,17 +1367,17 @@ function PeerConnectionWrapper(label, co
       type = 'audio';
     }
     if (event.stream.getVideoTracks().length > 0) {
       type += 'video';
     }
     self.attachMedia(event.stream, type, 'remote');
 
     Object.keys(self.addStreamCallbacks).forEach(function(name) {
-      info(this + " calling addStreamCallback " + name);
+      info(self + " calling addStreamCallback " + name);
       self.addStreamCallbacks[name]();
     });
    };
 
   this.ondatachannel = unexpectedEventAndFinish(this, 'ondatachannel');
 
   /**
    * Callback for native peer connection 'ondatachannel' events. If no custom handler
@@ -1906,18 +1906,18 @@ PeerConnectionWrapper.prototype = {
    * @param {object} constraintsRemote
    *        The media constraints of the local and remote peer connection object
    */
   checkMediaTracks : function PCW_checkMediaTracks(constraintsRemote, onSuccess) {
     var self = this;
     var addStreamTimeout = null;
 
     function _checkMediaTracks(constraintsRemote, onSuccess) {
-      if (self.addStreamTimeout !== null) {
-        clearTimeout(self.addStreamTimeout);
+      if (addStreamTimeout !== null) {
+        clearTimeout(addStreamTimeout);
       }
 
       var localConstraintAudioTracks =
         self.countAudioTracksInMediaConstraint(self.constraints);
       var localStreams = self._pc.getLocalStreams();
       var localAudioTracks = self.countAudioTracksInStreams(localStreams, false);
       is(localAudioTracks, localConstraintAudioTracks, self + ' has ' +
         localAudioTracks + ' local audio tracks');
@@ -1956,18 +1956,20 @@ PeerConnectionWrapper.prototype = {
     } else {
       info(self + " checkMediaTracks() got called before onAddStream fired");
       // we rely on the outer mochitest timeout to catch the case where
       // onaddstream never fires
       self.addStreamCallbacks.checkMediaTracks = function() {
         _checkMediaTracks(constraintsRemote, onSuccess);
       };
       addStreamTimeout = setTimeout(function () {
-        ok(false, self + " checkMediaTracks() timed out waiting for onaddstream event to fire");
-        onSuccess();
+        ok(self.onAddStreamFired, self + " checkMediaTracks() timed out waiting for onaddstream event to fire");
+        if (!self.onAddStreamFired) {
+          onSuccess();
+        }
       }, 60000);
     }
   },
 
   /**
    * Check that media flow is present on all media elements involved in this
    * test by waiting for confirmation that media flow is present.
    *
--- a/dom/tests/mochitest/pointerlock/file_retargetMouseEvents.html
+++ b/dom/tests/mochitest/pointerlock/file_retargetMouseEvents.html
@@ -91,78 +91,94 @@ https://bugzilla.mozilla.org/show_bug.cg
         childStats.wheel = true;
       };
 
       //  Event listeners for the parent element
       var startMouseTests = function() {
         parent.removeEventListener("mousemove", startMouseTests);
         parent.addEventListener("DOMMouseScroll", parentScrollTest);
         child.addEventListener("DOMMouseScroll", childScrollTest);
-        synthesizeWheel(child, 5, 5, {'deltaY': 10, 'lineOrPageDeltaY': 10,
-                                      'deltaMode': WheelEvent.DOM_DELTA_LINE});
+        SimpleTest.executeSoon(function () {
+          synthesizeWheel(child, 5, 5, {'deltaY': 10, 'lineOrPageDeltaY': 10,
+                                        'deltaMode': WheelEvent.DOM_DELTA_LINE});
+        });
       };
 
       var parentScrollTest = function (e) {
         parentStats.mouseScroll = true;
         parent.removeEventListener("DOMMouseScroll", parentScrollTest);
         child.removeEventListener("DOMMouseScroll", childScrollTest);
         parent.addEventListener("wheel", parentWheelTest);
         child.addEventListener("wheel", childWheelTest);
-        synthesizeWheel(child, 5, 5, {'deltaY': 10, 'lineOrPageDeltaY': 10,
-                                      'deltaMode': WheelEvent.DOM_DELTA_LINE});
+        SimpleTest.executeSoon(function () {
+          synthesizeWheel(child, 5, 5, {'deltaY': 10, 'lineOrPageDeltaY': 10,
+                                        'deltaMode': WheelEvent.DOM_DELTA_LINE});
+        });
       };
 
       var parentWheelTest = function (e) {
         parentStats.wheel = true;
         parent.removeEventListener("wheel", parentWheelTest);
         child.removeEventListener("wheel", childWheelTest);
         parent.addEventListener("mousedown", parentDownTest);
         child.addEventListener("mousedown", childDownTest);
-        synthesizeMouseAtCenter(child, {type: "mousedown"}, window);
+        SimpleTest.executeSoon(function () {
+          synthesizeMouseAtCenter(child, {type: "mousedown"}, window);
+        });
       };
 
       var parentDownTest = function (e) {
         parentStats.mouseDown = true;
         parent.removeEventListener("mousedown", parentDownTest);
         child.removeEventListener("mousedown", childDownTest);
         parent.addEventListener("mouseup", parentUpTest);
         child.addEventListener("mouseup", childUpTest);
-        synthesizeMouseAtCenter(child, {type: "mouseup"}, window);
+        SimpleTest.executeSoon(function () {
+          synthesizeMouseAtCenter(child, {type: "mouseup"}, window);
+        });
       };
 
       var parentUpTest = function (e) {
         parentStats.mouseUp = true;
         parent.removeEventListener("mouseup", parentUpTest);
         child.removeEventListener("mouseup", childUpTest);
         parent.addEventListener("click", parentClickTest);
         child.addEventListener("click", childClickTest);
-        synthesizeMouseAtCenter(child, {type: "click"}, window);
+        SimpleTest.executeSoon(function () {
+          synthesizeMouseAtCenter(child, {type: "click"}, window);
+        });
       };
 
       var parentClickTest = function (e) {
         parentStats.mouseClick = true;
         parent.removeEventListener("click", parentClickTest);
         child.removeEventListener("click", childClickTest);
         parent.addEventListener("mousemove", parentMoveTest);
         child.addEventListener("mousemove", childMoveTest);
-        synthesizeMouseAtCenter(child, {type: "mousemove"}, window);
+        SimpleTest.executeSoon(function () {
+          synthesizeMouseAtCenter(child, {type: "mousemove"}, window);
+        });
       };
 
       var parentMoveTest = function (e) {
         parentStats.mouseMove = true;
         parent.removeEventListener("mousemove", parentMoveTest);
         child.removeEventListener("mousemove", childMoveTest);
-        document.mozCancelFullScreen();
+        SimpleTest.executeSoon(function () {
+          document.mozCancelFullScreen();
+        });
       }
 
       document.addEventListener("mozpointerlockchange", function (e) {
         if (document.mozPointerLockElement === parent) {
           parent.addEventListener("mousemove", startMouseTests);
           child.addEventListener("mousemove", childMoveTest);
-          synthesizeMouseAtCenter(parent, {type: "mousemove"}, window);
+          SimpleTest.executeSoon(function () {
+            synthesizeMouseAtCenter(parent, {type: "mousemove"}, window);
+          });
         }
       }, false);
 
       document.addEventListener("mozfullscreenchange", function (e)  {
         if (document.mozFullScreenElement === parent) {
           parent.mozRequestPointerLock();
         } else {
           runTests();
--- a/dom/webidl/HTMLSelectElement.webidl
+++ b/dom/webidl/HTMLSelectElement.webidl
@@ -5,16 +5,18 @@
  *
  * The origin of this IDL file is
  * http://www.whatwg.org/html/#the-select-element
  */
 
 interface HTMLSelectElement : HTMLElement {
   [SetterThrows, Pure]
            attribute boolean autofocus;
+  [Pref="dom.forms.autocomplete.experimental", SetterThrows, Pure]
+           attribute DOMString autocomplete;
   [SetterThrows, Pure]
            attribute boolean disabled;
   [Pure]
   readonly attribute HTMLFormElement? form;
   [SetterThrows, Pure]
            attribute boolean multiple;
   [SetterThrows, Pure]
            attribute DOMString name;
--- a/gfx/layers/Layers.h
+++ b/gfx/layers/Layers.h
@@ -73,16 +73,17 @@ namespace dom {
 class OverfillCallback;
 }
 
 namespace layers {
 
 class Animation;
 class AnimationData;
 class AsyncPanZoomController;
+class ClientLayerManager;
 class CommonLayerAttributes;
 class Layer;
 class ThebesLayer;
 class ContainerLayer;
 class ImageLayer;
 class ColorLayer;
 class ImageContainer;
 class CanvasLayer;
@@ -198,16 +199,19 @@ public:
   bool IsDestroyed() { return mDestroyed; }
 
   virtual ShadowLayerForwarder* AsShadowForwarder()
   { return nullptr; }
 
   virtual LayerManagerComposite* AsLayerManagerComposite()
   { return nullptr; }
 
+  virtual ClientLayerManager* AsClientLayerManager()
+  { return nullptr; }
+
   /**
    * Returns true if this LayerManager is owned by an nsIWidget,
    * and is used for drawing into the widget.
    */
   virtual bool IsWidgetLayerManager() { return true; }
 
   /**
    * Start a new transaction. Nested transactions are not allowed so
--- a/gfx/layers/client/ClientLayerManager.cpp
+++ b/gfx/layers/client/ClientLayerManager.cpp
@@ -279,16 +279,25 @@ ClientLayerManager::GetRemoteRenderer()
 {
   if (!mWidget) {
     return nullptr;
   }
 
   return mWidget->GetRemoteRenderer();
 }
 
+CompositorChild*
+ClientLayerManager::GetCompositorChild()
+{
+  if (XRE_GetProcessType() != GeckoProcessType_Default) {
+    return CompositorChild::Get();
+  }
+  return GetRemoteRenderer();
+}
+
 void
 ClientLayerManager::Composite()
 {
   mForwarder->Composite();
 }
 
 void
 ClientLayerManager::DidComposite(uint64_t aTransactionId)
--- a/gfx/layers/client/ClientLayerManager.h
+++ b/gfx/layers/client/ClientLayerManager.h
@@ -46,16 +46,21 @@ public:
   ClientLayerManager(nsIWidget* aWidget);
   virtual ~ClientLayerManager();
 
   virtual ShadowLayerForwarder* AsShadowForwarder()
   {
     return mForwarder;
   }
 
+  virtual ClientLayerManager* AsClientLayerManager()
+  {
+    return this;
+  }
+
   virtual int32_t GetMaxTextureSize() const;
 
   virtual void SetDefaultTargetConfiguration(BufferMode aDoubleBuffering, ScreenRotation aRotation);
   virtual void BeginTransactionWithTarget(gfxContext* aTarget);
   virtual void BeginTransaction();
   virtual bool EndEmptyTransaction(EndTransactionFlags aFlags = END_DEFAULT);
   virtual void EndTransaction(DrawThebesLayerCallback aCallback,
                               void* aCallbackData,
@@ -102,43 +107,45 @@ public:
 
   bool HasShadowManager() const { return mForwarder->HasShadowManager(); }
 
   virtual bool IsCompositingCheap();
   virtual bool HasShadowManagerInternal() const { return HasShadowManager(); }
 
   virtual void SetIsFirstPaint() MOZ_OVERRIDE;
 
-  TextureClientPool *GetTexturePool(gfx::SurfaceFormat aFormat);
-  SimpleTextureClientPool *GetSimpleTileTexturePool(gfx::SurfaceFormat aFormat);
+  TextureClientPool* GetTexturePool(gfx::SurfaceFormat aFormat);
+  SimpleTextureClientPool* GetSimpleTileTexturePool(gfx::SurfaceFormat aFormat);
 
   // Drop cached resources and ask our shadow manager to do the same,
   // if we have one.
   virtual void ClearCachedResources(Layer* aSubtree = nullptr) MOZ_OVERRIDE;
 
   void SetRepeatTransaction() { mRepeatTransaction = true; }
   bool GetRepeatTransaction() { return mRepeatTransaction; }
 
   bool IsRepeatTransaction() { return mIsRepeatTransaction; }
 
   void SetTransactionIncomplete() { mTransactionIncomplete = true; }
 
   bool HasShadowTarget() { return !!mShadowTarget; }
 
-  void SetShadowTarget(gfxContext *aTarget) { mShadowTarget = aTarget; }
+  void SetShadowTarget(gfxContext* aTarget) { mShadowTarget = aTarget; }
 
   bool CompositorMightResample() { return mCompositorMightResample; } 
   
   DrawThebesLayerCallback GetThebesLayerCallback() const
   { return mThebesLayerCallback; }
 
   void* GetThebesLayerCallbackData() const
   { return mThebesLayerCallbackData; }
 
-  CompositorChild *GetRemoteRenderer();
+  CompositorChild* GetRemoteRenderer();
+
+  CompositorChild* GetCompositorChild();
 
   /**
    * Called for each iteration of a progressive tile update. Fills
    * aCompositionBounds and aZoom with the current scale and composition bounds
    * being used to composite the layers in this manager, to determine what area
    * intersects with the target composition bounds.
    * aDrawingCritical will be true if the current drawing operation is using
    * the critical displayport.
--- a/gfx/layers/client/ContentClient.cpp
+++ b/gfx/layers/client/ContentClient.cpp
@@ -193,25 +193,30 @@ ContentClientRemoteBuffer::CreateAndAllo
   aClient = CreateTextureClientForDrawing(mSurfaceFormat,
                                           mTextureInfo.mTextureFlags | aFlags,
                                           gfx::BackendType::NONE,
                                           mSize);
   if (!aClient) {
     return false;
   }
 
-  if (!aClient->AllocateForSurface(mSize, ALLOC_CLEAR_BUFFER)) {
+  TextureAllocationFlags flags = TextureAllocationFlags::ALLOC_CLEAR_BUFFER;
+  if (aFlags & TextureFlags::ON_WHITE) {
+    flags = TextureAllocationFlags::ALLOC_CLEAR_BUFFER_WHITE;
+  }
+
+  if (!aClient->AllocateForSurface(mSize, flags)) {
     aClient = CreateTextureClientForDrawing(mSurfaceFormat,
                 mTextureInfo.mTextureFlags | TextureFlags::ALLOC_FALLBACK | aFlags,
                 gfx::BackendType::NONE,
                 mSize);
     if (!aClient) {
       return false;
     }
-    if (!aClient->AllocateForSurface(mSize, ALLOC_CLEAR_BUFFER)) {
+    if (!aClient->AllocateForSurface(mSize, flags)) {
       NS_WARNING("Could not allocate texture client");
       aClient = nullptr;
       return false;
     }
   }
 
   NS_WARN_IF_FALSE(aClient->IsValid(), "Created an invalid texture client");
   return true;
--- a/gfx/layers/client/TextureClient.cpp
+++ b/gfx/layers/client/TextureClient.cpp
@@ -562,16 +562,19 @@ BufferTextureClient::AllocateForSurface(
     = ImageDataSerializer::ComputeMinBufferSize(aSize, mFormat);
   if (!Allocate(bufSize)) {
     return false;
   }
 
   if (aFlags & ALLOC_CLEAR_BUFFER) {
     memset(GetBuffer(), 0, bufSize);
   }
+  if (aFlags & ALLOC_CLEAR_BUFFER_WHITE) {
+    memset(GetBuffer(), 0xFF, bufSize);
+  }
 
   ImageDataSerializer serializer(GetBuffer(), GetBufferSize());
   serializer.InitializeBufferInfo(aSize, mFormat);
   mSize = aSize;
   return true;
 }
 
 gfx::DrawTarget*
--- a/gfx/layers/client/TextureClient.h
+++ b/gfx/layers/client/TextureClient.h
@@ -48,17 +48,18 @@ class TextureClient;
 
 /**
  * TextureClient is the abstraction that allows us to share data between the
  * content and the compositor side.
  */
 
 enum TextureAllocationFlags {
   ALLOC_DEFAULT = 0,
-  ALLOC_CLEAR_BUFFER = 1
+  ALLOC_CLEAR_BUFFER = 1,
+  ALLOC_CLEAR_BUFFER_WHITE = 2
 };
 
 /**
  * Interface for TextureClients that can be updated using YCbCr data.
  */
 class TextureClientYCbCr
 {
 public:
--- a/gfx/layers/client/TiledContentClient.cpp
+++ b/gfx/layers/client/TiledContentClient.cpp
@@ -147,17 +147,21 @@ SharedFrameMetricsHelper::UpdateFromComp
     ContainerLayer* aLayer,
     bool aHasPendingNewThebesContent,
     bool aLowPrecision,
     ParentLayerRect& aCompositionBounds,
     CSSToParentLayerScale& aZoom)
 {
   MOZ_ASSERT(aLayer);
 
-  CompositorChild* compositor = CompositorChild::Get();
+  CompositorChild* compositor = nullptr;
+  if(aLayer->Manager() &&
+     aLayer->Manager()->AsClientLayerManager()) {
+    compositor = aLayer->Manager()->AsClientLayerManager()->GetCompositorChild();
+  }
 
   if (!compositor) {
     FindFallbackContentFrameMetrics(aLayer, aCompositionBounds, aZoom);
     return false;
   }
 
   const FrameMetrics& contentMetrics = aLayer->GetFrameMetrics();
   FrameMetrics compositorMetrics;
--- a/gfx/layers/d3d11/TextureD3D11.cpp
+++ b/gfx/layers/d3d11/TextureD3D11.cpp
@@ -145,16 +145,17 @@ CreateTextureHostD3D11(const SurfaceDesc
   return result;
 }
 
 TextureClientD3D11::TextureClientD3D11(gfx::SurfaceFormat aFormat, TextureFlags aFlags)
   : TextureClient(aFlags)
   , mFormat(aFormat)
   , mIsLocked(false)
   , mNeedsClear(false)
+  , mNeedsClearWhite(false)
 {}
 
 TextureClientD3D11::~TextureClientD3D11()
 {
 #ifdef DEBUG
   // An Azure DrawTarget needs to be locked when it gets nullptr'ed as this is
   // when it calls EndDraw. This EndDraw should not execute anything so it
   // shouldn't -really- need the lock but the debug layer chokes on this.
@@ -179,16 +180,21 @@ TextureClientD3D11::Lock(OpenMode aMode)
   LockD3DTexture(mTexture.get());
   mIsLocked = true;
 
   if (mNeedsClear) {
     mDrawTarget = BorrowDrawTarget();
     mDrawTarget->ClearRect(Rect(0, 0, GetSize().width, GetSize().height));
     mNeedsClear = false;
   }
+  if (mNeedsClearWhite) {
+    mDrawTarget = BorrowDrawTarget();
+    mDrawTarget->FillRect(Rect(0, 0, GetSize().width, GetSize().height), ColorPattern(Color(1.0, 1.0, 1.0, 1.0)));
+    mNeedsClearWhite = false;
+  }
 
   return true;
 }
 
 void
 TextureClientD3D11::Unlock()
 {
   MOZ_ASSERT(mIsLocked, "Unlocked called while the texture is not locked!");
@@ -241,16 +247,17 @@ TextureClientD3D11::AllocateForSurface(g
 
   if (FAILED(hr)) {
     LOGD3D11("Error creating texture for client!");
     return false;
   }
 
   // Defer clearing to the next time we lock to avoid an extra (expensive) lock.
   mNeedsClear = aFlags & ALLOC_CLEAR_BUFFER;
+  mNeedsClearWhite = aFlags & ALLOC_CLEAR_BUFFER_WHITE;
 
   return true;
 }
 
 bool
 TextureClientD3D11::ToSurfaceDescriptor(SurfaceDescriptor& aOutDescriptor)
 {
   if (!IsAllocated()) {
--- a/gfx/layers/d3d11/TextureD3D11.h
+++ b/gfx/layers/d3d11/TextureD3D11.h
@@ -61,16 +61,17 @@ public:
 
 protected:
   gfx::IntSize mSize;
   RefPtr<ID3D10Texture2D> mTexture;
   RefPtr<gfx::DrawTarget> mDrawTarget;
   gfx::SurfaceFormat mFormat;
   bool mIsLocked;
   bool mNeedsClear;
+  bool mNeedsClearWhite;
 };
 
 /**
  * TextureSource that provides with the necessary APIs to be composited by a
  * CompositorD3D11.
  */
 class TextureSourceD3D11
 {
--- a/gfx/layers/d3d9/TextureD3D9.cpp
+++ b/gfx/layers/d3d9/TextureD3D9.cpp
@@ -546,16 +546,17 @@ DataTextureSourceD3D9::GetTileRect()
   return ThebesIntRect(GetTileRect(mCurrentTile));
 }
 
 CairoTextureClientD3D9::CairoTextureClientD3D9(gfx::SurfaceFormat aFormat, TextureFlags aFlags)
   : TextureClient(aFlags)
   , mFormat(aFormat)
   , mIsLocked(false)
   , mNeedsClear(false)
+  , mNeedsClearWhite(false)
   , mLockRect(false)
 {
   MOZ_COUNT_CTOR(CairoTextureClientD3D9);
 }
 
 CairoTextureClientD3D9::~CairoTextureClientD3D9()
 {
   MOZ_COUNT_DTOR(CairoTextureClientD3D9);
@@ -586,16 +587,21 @@ CairoTextureClientD3D9::Lock(OpenMode)
 
   mIsLocked = true;
 
   if (mNeedsClear) {
     mDrawTarget = BorrowDrawTarget();
     mDrawTarget->ClearRect(Rect(0, 0, GetSize().width, GetSize().height));
     mNeedsClear = false;
   }
+  if (mNeedsClearWhite) {
+    mDrawTarget = BorrowDrawTarget();
+    mDrawTarget->FillRect(Rect(0, 0, GetSize().width, GetSize().height), ColorPattern(Color(1.0, 1.0, 1.0, 1.0)));
+    mNeedsClearWhite = false;
+  }
 
   return true;
 }
 
 void
 CairoTextureClientD3D9::Unlock()
 {
   MOZ_ASSERT(mIsLocked, "Unlocked called while the texture is not locked!");
@@ -670,16 +676,17 @@ CairoTextureClientD3D9::AllocateForSurfa
   DeviceManagerD3D9* deviceManager = gfxWindowsPlatform::GetPlatform()->GetD3D9DeviceManager();
   if (!deviceManager ||
       !(mTexture = deviceManager->CreateTexture(mSize, format, D3DPOOL_SYSTEMMEM, nullptr))) {
     NS_WARNING("Could not create d3d9 texture");
     return false;
   }
 
   mNeedsClear = aFlags & ALLOC_CLEAR_BUFFER;
+  mNeedsClearWhite = aFlags & ALLOC_CLEAR_BUFFER_WHITE;
 
   MOZ_ASSERT(mTexture);
   return true;
 }
 
 DIBTextureClientD3D9::DIBTextureClientD3D9(gfx::SurfaceFormat aFormat, TextureFlags aFlags)
   : TextureClient(aFlags)
   , mFormat(aFormat)
--- a/gfx/layers/d3d9/TextureD3D9.h
+++ b/gfx/layers/d3d9/TextureD3D9.h
@@ -220,16 +220,17 @@ private:
   RefPtr<IDirect3DTexture9> mTexture;
   nsRefPtr<IDirect3DSurface9> mD3D9Surface;
   RefPtr<gfx::DrawTarget> mDrawTarget;
   nsRefPtr<gfxASurface> mSurface;
   gfx::IntSize mSize;
   gfx::SurfaceFormat mFormat;
   bool mIsLocked;
   bool mNeedsClear;
+  bool mNeedsClearWhite;
   bool mLockRect;
 };
 
 /**
  * Can only be drawn into through Cairo.
  * Prefer CairoTextureClientD3D9 when possible.
  * The coresponding TextureHost is DIBTextureHostD3D9.
  */
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -38,17 +38,17 @@ namespace JS {
     D(API)                                      \
     D(MAYBEGC)                                  \
     D(DESTROY_RUNTIME)                          \
     D(DESTROY_CONTEXT)                          \
     D(LAST_DITCH)                               \
     D(TOO_MUCH_MALLOC)                          \
     D(ALLOC_TRIGGER)                            \
     D(DEBUG_GC)                                 \
-    D(COMPARTMENT_REVIVED)                      \
+    D(TRANSPLANT)                               \
     D(RESET)                                    \
     D(OUT_OF_NURSERY)                           \
     D(EVICT_NURSERY)                            \
     D(FULL_STORE_BUFFER)                        \
                                                 \
     /* These are reserved for future use. */    \
     D(RESERVED0)                                \
     D(RESERVED1)                                \
--- a/js/src/aclocal.m4
+++ b/js/src/aclocal.m4
@@ -7,16 +7,17 @@ builtin(include, ../../build/autoconf/ho
 builtin(include, ../../build/autoconf/acwinpaths.m4)dnl
 builtin(include, ../../build/autoconf/hooks.m4)dnl
 builtin(include, ../../build/autoconf/config.status.m4)dnl
 builtin(include, ../../build/autoconf/toolchain.m4)dnl
 builtin(include, ../../build/autoconf/ccache.m4)dnl
 builtin(include, ../../build/autoconf/wrapper.m4)dnl
 builtin(include, ../../build/autoconf/pkg.m4)dnl
 builtin(include, ../../build/autoconf/nspr.m4)dnl
+builtin(include, ../../build/autoconf/nspr-build.m4)dnl
 builtin(include, ../../build/autoconf/codeset.m4)dnl
 builtin(include, ../../build/autoconf/altoptions.m4)dnl
 builtin(include, ../../build/autoconf/mozprog.m4)dnl
 builtin(include, ../../build/autoconf/mozheader.m4)dnl
 builtin(include, ../../build/autoconf/mozcommonheader.m4)dnl
 builtin(include, ../../build/autoconf/lto.m4)dnl
 builtin(include, ../../build/autoconf/gcc-pr49911.m4)dnl
 builtin(include, ../../build/autoconf/gcc-pr39608.m4)dnl
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -107,17 +107,17 @@ if test "$_conflict_files"; then
 	*   To clean up the source tree:
 	*     1. cd $_topsrcdir
 	*     2. gmake distclean
 	***
 	EOF
   exit 1
   break
 fi
-MOZ_BUILD_ROOT=`pwd`
+MOZ_BUILD_ROOT=`pwd -W 2>/dev/null || pwd`
 
 dnl Choose where to put the 'dist' directory.
 dnl ==============================================================
 
 MOZ_ARG_WITH_STRING(dist-dir,
 [  --with-dist-dir=DIR     Use DIR as 'dist' staging area.  DIR may be
                           relative to the top of SpiderMonkey build tree,
                           or absolute.],
@@ -166,16 +166,17 @@ else
 fi
 AC_SUBST(JS_SHARED_LIBRARY)
 
 if test "$JS_STANDALONE" = no; then
   autoconfmk=autoconf-js.mk
   JS_STANDALONE=
 else
   JS_STANDALONE=1
+  LIBXUL_DIST="$MOZ_BUILD_ROOT/dist"
   AC_DEFINE(JS_STANDALONE)
 fi
 AC_SUBST(JS_STANDALONE)
 BUILDING_JS=1
 AC_SUBST(autoconfmk)
 
 MOZ_ARG_WITH_STRING(gonk,
 [  --with-gonk=DIR
@@ -1718,22 +1719,16 @@ ia64*-hpux*)
     TARGET_NSPR_MDCPUCFG='\"md/_win95.cfg\"'
 
     dnl set NO_X11 defines here as the general check is skipped on win32
     no_x=yes
     AC_DEFINE(NO_X11)
 
     case "$host" in
     *-mingw*)
-        MOZ_BUILD_ROOT=`cd $MOZ_BUILD_ROOT && pwd -W`
-        ;;
-    esac
-
-    case "$host" in
-    *-mingw*)
         if test -z "$MOZ_TOOLS"; then
             AC_MSG_ERROR([MOZ_TOOLS is not set])
         fi
         MOZ_TOOLS_DIR=`cd $MOZ_TOOLS && pwd -W`
         if test "$?" != "0" -o -z "$MOZ_TOOLS_DIR"; then
             AC_MSG_ERROR([cd \$MOZ_TOOLS failed. MOZ_TOOLS ==? $MOZ_TOOLS])
         fi
         MOZ_TOOLS_BIN_DIR="$(cd "$MOZ_TOOLS_DIR/bin" && pwd)"
@@ -2753,113 +2748,28 @@ AC_SUBST(MOZ_DISABLE_UNIFIED_COMPILATION
 
 dnl ========================================================
 dnl =
 dnl = Check for external package dependencies
 dnl =
 dnl ========================================================
 MOZ_ARG_HEADER(External Packages)
 
-dnl ========================================================
-dnl = Find the right NSPR to use.
-dnl ========================================================
-MOZ_ARG_WITH_BOOL(system-nspr,
-[  --with-system-nspr      Use an NSPR that is already built and installed.
-                          Use the 'nspr-config' script in the current path,
-                          or look for the script in the directories given with
-                          --with-nspr-exec-prefix or --with-nspr-prefix.
-                          (Those flags are only checked if you specify
-                          --with-system-nspr.)],
-    _USE_SYSTEM_NSPR=1 )
-
-MOZ_ARG_WITH_STRING(nspr-cflags,
-[  --with-nspr-cflags=FLAGS
-                          Pass FLAGS to CC when building code that uses NSPR.
-                          Use this when there's no accurate nspr-config
-                          script available.  This is the case when building
-                          SpiderMonkey as part of the Mozilla tree: the
-                          top-level configure script computes NSPR flags
-                          that accomodate the quirks of that environment.],
-    NSPR_CFLAGS=$withval)
-MOZ_ARG_WITH_STRING(nspr-libs,
-[  --with-nspr-libs=LIBS   Pass LIBS to LD when linking code that uses NSPR.
-                          See --with-nspr-cflags for more details.],
-    NSPR_LIBS=$withval)
-AC_SUBST(NSPR_CFLAGS)
-AC_SUBST(NSPR_LIBS)
-
 JS_THREADSAFE=1
 MOZ_ARG_DISABLE_BOOL(threadsafe,
 [  --disable-threadsafe    Disable support for multiple threads.],
     JS_THREADSAFE= ,
     JS_THREADSAFE=1 )
 if test -n "$JS_THREADSAFE"; then
     AC_DEFINE(JS_THREADSAFE)
 fi
 JS_THREADSAFE_CONFIGURED=$JS_THREADSAFE
 AC_SUBST(JS_THREADSAFE_CONFIGURED)
 
-if test "$_USE_SYSTEM_NSPR" || (test "$NSPR_CFLAGS" -o "$NSPR_LIBS"); then
-  _HAS_NSPR=1
-fi
-
-case "$target" in
-  *linux*|*darwin*|*dragonfly*|*freebsd*|*netbsd*|*openbsd*)
-    if test -z "$_HAS_NSPR" && test "$JS_THREADSAFE"; then
-      JS_POSIX_NSPR_DEFAULT=1
-    fi
-    ;;
-esac
-
-MOZ_ARG_ENABLE_BOOL(posix-nspr-emulation,
-[  --enable-posix-nspr-emulation
-                          Enable emulation of NSPR for POSIX systems],
-    JS_POSIX_NSPR=1,
-    JS_POSIX_NSPR=,
-    JS_POSIX_NSPR="$JS_POSIX_NSPR_DEFAULT" )
-if test -n "$JS_POSIX_NSPR"; then
-    AC_DEFINE(JS_POSIX_NSPR)
-fi
-
-AC_SUBST(JS_POSIX_NSPR)
-
-dnl Pass either --with-system-nspr or (--with-nspr-cflags and
-dnl --with-nspr-libs), but not both.
-if test "$_USE_SYSTEM_NSPR" && (test "$NSPR_CFLAGS" -o "$NSPR_LIBS"); then
-    AC_MSG_ERROR([--with-system-nspr and --with-nspr-libs/cflags are mutually exclusive.
-See 'configure --help'.])
-fi
-
-dnl Can't use --enable-posix-nspr-emulation if compiling with NSPR.
-if test "$_HAS_NSPR" && test "$JS_POSIX_NSPR"; then
-    AC_MSG_ERROR([--enable-posix-nspr-emulation is mututally exclusive with --with-system-nspr
-and --with-nspr-libs/cflags. See 'configure --help'.])
-fi
-
-if test -n "$_USE_SYSTEM_NSPR"; then
-    MOZ_NATIVE_NSPR=
-    AM_PATH_NSPR($NSPR_MINVER, [MOZ_NATIVE_NSPR=1], [AC_MSG_ERROR([your don't have NSPR installed or your version is too old])])
-fi
-
-NSPR_PKGCONF_CHECK="nspr"
-if test -n "$MOZ_NATIVE_NSPR"; then
-    # piggy back on $MOZ_NATIVE_NSPR to set a variable for the nspr check for js.pc
-    NSPR_PKGCONF_CHECK="nspr >= $NSPR_MINVER"
-
-    _SAVE_CFLAGS=$CFLAGS
-    CFLAGS="$CFLAGS $NSPR_CFLAGS"
-    AC_TRY_COMPILE([#include "prlog.h"],
-                [#ifndef PR_STATIC_ASSERT
-                 #error PR_STATIC_ASSERT not defined
-                 #endif],
-                [MOZ_NATIVE_NSPR=1],
-                AC_MSG_ERROR([system NSPR does not support PR_STATIC_ASSERT]))
-    CFLAGS=$_SAVE_CFLAGS
-fi
-AC_SUBST(NSPR_PKGCONF_CHECK)
+MOZ_CONFIG_NSPR(js)
 
 dnl ========================================================
 dnl system zlib Support
 dnl ========================================================
 dnl Standalone js defaults to system zlib
 ZLIB_DIR=yes
 
 MOZ_ZLIB_CHECK([1.2.3])
@@ -4036,17 +3946,16 @@ AC_HAVE_FUNCS(setlocale)
 AC_HAVE_FUNCS(localeconv)
 
 AC_SUBST(MOZILLA_VERSION)
 
 AC_SUBST(ac_configure_args)
 
 AC_SUBST(TOOLCHAIN_PREFIX)
 
-
 if test -n "$JS_STANDALONE"; then
 MOZ_APP_NAME="mozjs"
 MOZ_APP_VERSION="$MOZILLA_SYMBOLVERSION"
 JS_LIBRARY_NAME="mozjs-$MOZILLA_SYMBOLVERSION"
 else
 JS_LIBRARY_NAME="mozjs"
 fi
 JS_CONFIG_LIBS="$NSPR_LIBS $LIBS"
@@ -4054,16 +3963,19 @@ if test -n "$GNU_CC"; then
 JS_CONFIG_MOZ_JS_LIBS='-L${libdir} -l${JS_LIBRARY_NAME}'
 else
 JS_CONFIG_MOZ_JS_LIBS='${libdir}/${JS_LIBRARY_NAME}.lib'
 fi
 AC_SUBST(JS_LIBRARY_NAME)
 AC_SUBST(JS_CONFIG_MOZ_JS_LIBS)
 AC_SUBST(JS_CONFIG_LIBS)
 
+if test -n "$MOZ_BUILD_NSPR"; then
+    MOZ_SUBCONFIGURE_NSPR()
+fi
 MOZ_SUBCONFIGURE_FFI()
 
 dnl Spit out some output
 dnl ========================================================
 MOZ_CREATE_CONFIG_STATUS()
 
 if test "$JS_STANDALONE"; then
   MOZ_RUN_CONFIG_STATUS()
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -193,16 +193,19 @@ CheckMarkedThing(JSTracer *trc, T **thin
         return;
 
     JS_ASSERT(thing->zone());
     JS_ASSERT(thing->zone()->runtimeFromMainThread() == trc->runtime());
     JS_ASSERT(trc->hasTracingDetails());
 
     DebugOnly<JSRuntime *> rt = trc->runtime();
 
+    JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc) && rt->gc.isManipulatingDeadZones(),
+                 !thing->zone()->scheduledForDestruction);
+
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     JS_ASSERT_IF(thing->zone()->requireGCTracer(),
                  IS_GC_MARKING_TRACER(trc));
 
     JS_ASSERT(thing->isAligned());
 
     JS_ASSERT(MapTypeToTraceKind<T>::kind == GetGCThingTraceKind(thing));
@@ -223,43 +226,16 @@ CheckMarkedThing(JSTracer *trc, T **thin
      * ArenaHeader may not be synced with the real one in ArenaLists.
      */
     JS_ASSERT_IF(IsThingPoisoned(thing) && rt->isHeapBusy(),
                  !InFreeList(thing->arenaHeader(), thing));
 #endif
 
 }
 
-/*
- * We only set the maybeAlive flag for objects and scripts. It's assumed that,
- * if a compartment is alive, then it will have at least some live object or
- * script it in. Even if we get this wrong, the worst that will happen is that
- * scheduledForDestruction will be set on the compartment, which will cause some
- * extra GC activity to try to free the compartment.
- */
-template<typename T>
-static inline void
-SetMaybeAliveFlag(T *thing)
-{
-}
-
-template<>
-void
-SetMaybeAliveFlag(JSObject *thing)
-{
-    thing->compartment()->maybeAlive = true;
-}
-
-template<>
-void
-SetMaybeAliveFlag(JSScript *thing)
-{
-    thing->compartment()->maybeAlive = true;
-}
-
 template<typename T>
 static void
 MarkInternal(JSTracer *trc, T **thingp)
 {
     CheckMarkedThing(trc, thingp);
     T *thing = *thingp;
 
     if (!trc->callback) {
@@ -293,17 +269,17 @@ MarkInternal(JSTracer *trc, T **thingp)
         /*
          * Don't mark things outside a compartment if we are in a
          * per-compartment GC.
          */
         if (!thing->zone()->isGCMarking())
             return;
 
         PushMarkStack(AsGCMarker(trc), thing);
-        SetMaybeAliveFlag(thing);
+        thing->zone()->maybeAlive = true;
     } else {
         trc->callback(trc, (void **)thingp, MapTypeToTraceKind<T>::kind);
         trc->unsetTracingLocation();
     }
 
     trc->clearTracingDetails();
 }
 
--- a/js/src/gc/Tracer.cpp
+++ b/js/src/gc/Tracer.cpp
@@ -633,30 +633,17 @@ GCMarker::appendGrayRoot(void *thing, JS
 #ifdef DEBUG
     root.debugPrinter = debugPrinter();
     root.debugPrintArg = debugPrintArg();
     root.debugPrintIndex = debugPrintIndex();
 #endif
 
     Zone *zone = static_cast<Cell *>(thing)->tenuredZone();
     if (zone->isCollecting()) {
-        // See the comment on SetMaybeAliveFlag to see why we only do this for
-        // objects and scripts. We rely on gray root buffering for this to work,
-        // but we only need to worry about uncollected dead compartments during
-        // incremental GCs (when we do gray root buffering).
-        switch (kind) {
-          case JSTRACE_OBJECT:
-            static_cast<JSObject *>(thing)->compartment()->maybeAlive = true;
-            break;
-          case JSTRACE_SCRIPT:
-            static_cast<JSScript *>(thing)->compartment()->maybeAlive = true;
-            break;
-          default:
-            break;
-        }
+        zone->maybeAlive = true;
         if (!zone->gcGrayRoots.append(root)) {
             resetBufferedGrayRoots();
             grayBufferState = GRAY_BUFFER_FAILED;
         }
     }
 }
 
 void
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -30,16 +30,18 @@ JS::Zone::Zone(JSRuntime *rt)
     gcHeapGrowthFactor(3.0),
     gcMallocBytes(0),
     gcMallocGCTriggered(false),
     gcBytes(0),
     gcTriggerBytes(0),
     data(nullptr),
     isSystem(false),
     usedByExclusiveThread(false),
+    scheduledForDestruction(false),
+    maybeAlive(true),
     active(false),
     jitZone_(nullptr),
     gcState_(NoGC),
     gcScheduled_(false),
     gcPreserveCode_(false),
     ionUsingBarriers_(false)
 {
     /* Ensure that there are no vtables to mess us up here. */
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -253,16 +253,21 @@ struct Zone : public JS::shadow::Zone,
 
     // Per-zone data for use by an embedder.
     void *data;
 
     bool isSystem;
 
     bool usedByExclusiveThread;
 
+    // These flags help us to discover if a compartment that shouldn't be alive
+    // manages to outlive a GC.
+    bool scheduledForDestruction;
+    bool maybeAlive;
+
     // True when there are active frames.
     bool active;
 
     mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
 
   private:
     js::jit::JitZone *jitZone_;
 
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/parallel/bug1024567.js
@@ -0,0 +1,10 @@
+// Stale assertion in initDenseElementsUnbarriered, but it's a useful test anyhow.
+
+if (!getBuildConfiguration().parallelJS)
+  quit(0);
+
+var sum=0;
+for ( var i=0 ; i < 1000 ; i++ ) {
+    sum += ([0].mapPar(function (...xs) { return xs.length; }))[0];
+}
+assertEq(sum, 3000);		// Function is invoked with value, index, self
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -1085,16 +1085,17 @@ JS_WrapId(JSContext *cx, JS::MutableHand
 JS_PUBLIC_API(JSObject *)
 JS_TransplantObject(JSContext *cx, HandleObject origobj, HandleObject target)
 {
     AssertHeapIsIdle(cx);
     JS_ASSERT(origobj != target);
     JS_ASSERT(!origobj->is<CrossCompartmentWrapperObject>());
     JS_ASSERT(!target->is<CrossCompartmentWrapperObject>());
 
+    AutoMaybeTouchDeadZones agc(cx);
     AutoDisableProxyCheck adpc(cx->runtime());
 
     JSCompartment *destination = target->compartment();
     RootedValue origv(cx, ObjectValue(*origobj));
     RootedObject newIdentity(cx);
 
     if (origobj->compartment() == destination) {
         // If the original object is in the same compartment as the
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -60,19 +60,17 @@ JSCompartment::JSCompartment(Zone *zone,
     gcWeakMapList(nullptr),
     debugModeBits(runtime_->debugMode ? DebugFromC : 0),
     rngState(0),
     watchpointMap(nullptr),
     scriptCountsMap(nullptr),
     debugScriptMap(nullptr),
     debugScopes(nullptr),
     enumerators(nullptr),
-    compartmentStats(nullptr),
-    scheduledForDestruction(false),
-    maybeAlive(true)
+    compartmentStats(nullptr)
 #ifdef JS_ION
     , jitCompartment_(nullptr)
 #endif
 {
     runtime_->numCompartments++;
     JS_ASSERT_IF(options.mergeable(), options.invisibleToDebugger());
 }
 
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -443,21 +443,16 @@ struct JSCompartment
      * List of potentially active iterators that may need deleted property
      * suppression.
      */
     js::NativeIterator *enumerators;
 
     /* Used by memory reporters and invalid otherwise. */
     void               *compartmentStats;
 
-    // These flags help us to discover if a compartment that shouldn't be alive
-    // manages to outlive a GC.
-    bool scheduledForDestruction;
-    bool maybeAlive;
-
 #ifdef JS_ION
   private:
     js::jit::JitCompartment *jitCompartment_;
 
   public:
     bool ensureJitCompartmentExists(JSContext *cx);
     js::jit::JitCompartment *jitCompartment() {
         return jitCompartment_;
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -952,33 +952,35 @@ JS::IsIncrementalBarrierNeeded(JSContext
 JS_FRIEND_API(void)
 JS::IncrementalObjectBarrier(JSObject *obj)
 {
     if (!obj)
         return;
 
     JS_ASSERT(!obj->zone()->runtimeFromMainThread()->isHeapMajorCollecting());
 
+    AutoMarkInDeadZone amn(obj->zone());
+
     JSObject::writeBarrierPre(obj);
 }
 
 JS_FRIEND_API(void)
 JS::IncrementalReferenceBarrier(void *ptr, JSGCTraceKind kind)
 {
     if (!ptr)
         return;
 
     gc::Cell *cell = static_cast<gc::Cell *>(ptr);
-
-#ifdef DEBUG
     Zone *zone = kind == JSTRACE_OBJECT
                  ? static_cast<JSObject *>(cell)->zone()
                  : cell->tenuredZone();
+
     JS_ASSERT(!zone->runtimeFromMainThread()->isHeapMajorCollecting());
-#endif
+
+    AutoMarkInDeadZone amn(zone);
 
     if (kind == JSTRACE_OBJECT)
         JSObject::writeBarrierPre(static_cast<JSObject*>(cell));
     else if (kind == JSTRACE_STRING)
         JSString::writeBarrierPre(static_cast<JSString*>(cell));
     else if (kind == JSTRACE_SCRIPT)
         JSScript::writeBarrierPre(static_cast<JSScript*>(cell));
     else if (kind == JSTRACE_LAZY_SCRIPT)
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -3095,24 +3095,24 @@ GCRuntime::beginMarkPhase()
             if (!rt->isAtomsZone(zone)) {
                 any = true;
                 zone->setGCState(Zone::Mark);
             }
         } else {
             isFull = false;
         }
 
+        zone->scheduledForDestruction = false;
+        zone->maybeAlive = false;
         zone->setPreservingCode(false);
     }
 
     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
         JS_ASSERT(c->gcLiveArrayBuffers.empty());
         c->marked = false;
-        c->scheduledForDestruction = false;
-        c->maybeAlive = false;
         if (shouldPreserveJITCode(c, currentTime))
             c->zone()->setPreservingCode(true);
     }
 
     if (!rt->gc.shouldCleanUpEverything) {
 #ifdef JS_ION
         if (JSCompartment *comp = jit::TopmostIonActivationCompartment(rt))
             comp->zone()->setPreservingCode(true);
@@ -3203,71 +3203,61 @@ GCRuntime::beginMarkPhase()
     if (isFull)
         UnmarkScriptData(rt);
 
     markRuntime(gcmarker);
     if (isIncremental)
         bufferGrayRoots();
 
     /*
-     * This code ensures that if a compartment is "dead", then it will be
-     * collected in this GC. A compartment is considered dead if its maybeAlive
+     * This code ensures that if a zone is "dead", then it will be
+     * collected in this GC. A zone is considered dead if its maybeAlive
      * flag is false. The maybeAlive flag is set if:
-     *   (1) the compartment has incoming cross-compartment edges, or
-     *   (2) an object in the compartment was marked during root marking, either
+     *   (1) the zone has incoming cross-compartment edges, or
+     *   (2) an object in the zone was marked during root marking, either
      *       as a black root or a gray root.
      * If the maybeAlive is false, then we set the scheduledForDestruction flag.
-     * At the end of the GC, we look for compartments where
-     * scheduledForDestruction is true. These are compartments that were somehow
-     * "revived" during the incremental GC. If any are found, we do a special,
-     * non-incremental GC of those compartments to try to collect them.
+     * At any time later in the GC, if we try to mark an object whose
+     * zone is scheduled for destruction, we will assert.
+     * NOTE: Due to bug 811587, we only assert if gcManipulatingDeadCompartments
+     * is true (e.g., if we're doing a brain transplant).
      *
-     * Compartments can be revived for a variety of reasons. On reason is bug
-     * 811587, where a reflector that was dead can be revived by DOM code that
-     * still refers to the underlying DOM node.
+     * The purpose of this check is to ensure that a zone that we would
+     * normally destroy is not resurrected by a read barrier or an
+     * allocation. This might happen during a function like JS_TransplantObject,
+     * which iterates over all compartments, live or dead, and operates on their
+     * objects. See bug 803376 for details on this problem. To avoid the
+     * problem, we are very careful to avoid allocation and read barriers during
+     * JS_TransplantObject and the like. The code here ensures that we don't
+     * regress.
      *
-     * Read barriers and allocations can also cause revival. This might happen
-     * during a function like JS_TransplantObject, which iterates over all
-     * compartments, live or dead, and operates on their objects. See bug 803376
-     * for details on this problem. To avoid the problem, we try to avoid
-     * allocation and read barriers during JS_TransplantObject and the like.
+     * Note that there are certain cases where allocations or read barriers in
+     * dead zone are difficult to avoid. We detect such cases (via the
+     * gcObjectsMarkedInDeadCompartment counter) and redo any ongoing GCs after
+     * the JS_TransplantObject function has finished. This ensures that the dead
+     * zones will be cleaned up. See AutoMarkInDeadZone and
+     * AutoMaybeTouchDeadZones for details.
      */
 
     /* Set the maybeAlive flag based on cross-compartment edges. */
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
-            const CrossCompartmentKey &key = e.front().key();
-            JSCompartment *dest;
-            switch (key.kind) {
-              case CrossCompartmentKey::ObjectWrapper:
-              case CrossCompartmentKey::DebuggerObject:
-              case CrossCompartmentKey::DebuggerSource:
-              case CrossCompartmentKey::DebuggerEnvironment:
-                dest = static_cast<JSObject *>(key.wrapped)->compartment();
-                break;
-              case CrossCompartmentKey::DebuggerScript:
-                dest = static_cast<JSScript *>(key.wrapped)->compartment();
-                break;
-              default:
-                dest = nullptr;
-                break;
-            }
-            if (dest)
-                dest->maybeAlive = true;
+            Cell *dst = e.front().key().wrapped;
+            dst->tenuredZone()->maybeAlive = true;
         }
     }
 
     /*
      * For black roots, code in gc/Marking.cpp will already have set maybeAlive
      * during MarkRuntime.
      */
 
-    for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
-        if (!c->maybeAlive && !rt->isAtomsCompartment(c))
-            c->scheduledForDestruction = true;
+    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+        if (!zone->maybeAlive && !rt->isAtomsZone(zone))
+            zone->scheduledForDestruction = true;
     }
     foundBlackGrayEdges = false;
 
     return true;
 }
 
 template <class CompartmentIterT>
 void
@@ -4572,18 +4562,18 @@ GCRuntime::resetIncrementalGC(const char
         JS_ASSERT(!strictCompartmentChecking);
 
         break;
       }
 
       case SWEEP:
         marker.reset();
 
-        for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
-            c->scheduledForDestruction = false;
+        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
+            zone->scheduledForDestruction = false;
 
         /* Finish sweeping the current zone group, then abort. */
         abortSweepAfterCurrentGroup = true;
         incrementalCollectSlice(SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL);
 
         {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
@@ -5082,39 +5072,22 @@ GCRuntime::collect(bool incremental, int
                 gcCallback.op(rt, JSGC_END, gcCallback.data);
         }
 
         /* Need to re-schedule all zones for GC. */
         if (poked && shouldCleanUpEverything)
             JS::PrepareForFullGC(rt);
 
         /*
-         * This code makes an extra effort to collect compartments that we
-         * thought were dead at the start of the GC. See the large comment in
-         * beginMarkPhase.
-         */
-        bool repeatForDeadZone = false;
-        if (incremental && incrementalState == NO_INCREMENTAL) {
-            for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
-                if (c->scheduledForDestruction) {
-                    incremental = false;
-                    repeatForDeadZone = true;
-                    reason = JS::gcreason::COMPARTMENT_REVIVED;
-                    c->zone()->scheduleGC();
-                }
-            }
-        }
-
-        /*
          * If we reset an existing GC, we need to start a new one. Also, we
          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
          * case) until we can be sure that no additional garbage is created
          * (which typically happens if roots are dropped during finalizers).
          */
-        repeat = (poked && shouldCleanUpEverything) || wasReset || repeatForDeadZone;
+        repeat = (poked && shouldCleanUpEverything) || wasReset;
     } while (repeat);
 
     if (incrementalState == NO_INCREMENTAL) {
 #ifdef JS_THREADSAFE
         EnqueuePendingParseTasksAfterGC(rt);
 #endif
     }
 }
@@ -5676,16 +5649,44 @@ ArenaLists::containsArena(JSRuntime *rt,
     for (ArenaHeader *aheader = arenaLists[allocKind].head(); aheader; aheader = aheader->next) {
         if (aheader == needle)
             return true;
     }
     return false;
 }
 
 
+AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSContext *cx)
+  : runtime(cx->runtime()),
+    markCount(runtime->gc.objectsMarkedInDeadZonesCount()),
+    inIncremental(JS::IsIncrementalGCInProgress(runtime)),
+    manipulatingDeadZones(runtime->gc.isManipulatingDeadZones())
+{
+    runtime->gc.setManipulatingDeadZones(true);
+}
+
+AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSObject *obj)
+  : runtime(obj->compartment()->runtimeFromMainThread()),
+    markCount(runtime->gc.objectsMarkedInDeadZonesCount()),
+    inIncremental(JS::IsIncrementalGCInProgress(runtime)),
+    manipulatingDeadZones(runtime->gc.isManipulatingDeadZones())
+{
+    runtime->gc.setManipulatingDeadZones(true);
+}
+
+AutoMaybeTouchDeadZones::~AutoMaybeTouchDeadZones()
+{
+    runtime->gc.setManipulatingDeadZones(manipulatingDeadZones);
+
+    if (inIncremental && runtime->gc.objectsMarkedInDeadZonesCount() != markCount) {
+        JS::PrepareForFullGC(runtime);
+        js::GC(runtime, GC_NORMAL, JS::gcreason::TRANSPLANT);
+    }
+}
+
 AutoSuppressGC::AutoSuppressGC(ExclusiveContext *cx)
   : suppressGC_(cx->perThreadData->suppressGC)
 {
     suppressGC_++;
 }
 
 AutoSuppressGC::AutoSuppressGC(JSCompartment *comp)
   : suppressGC_(comp->runtimeFromMainThread()->mainThread.suppressGC)
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -1285,16 +1285,22 @@ class AutoEnterOOMUnsafeRegion
     ~AutoEnterOOMUnsafeRegion() {
         OOM_maxAllocations = saved_;
     }
 };
 #else
 class AutoEnterOOMUnsafeRegion {};
 #endif /* DEBUG */
 
+// This tests whether something is inside the GGC's nursery only;
+// use sparingly, mostly testing for any nursery, using IsInsideNursery,
+// is appropriate.
+bool
+IsInsideGGCNursery(const gc::Cell *cell);
+
 } /* namespace gc */
 
 #ifdef DEBUG
 /* Use this to avoid assertions when manipulating the wrapper map. */
 class AutoDisableProxyCheck
 {
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
     uintptr_t &count;
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -11,16 +11,43 @@
 
 #include "gc/Zone.h"
 #include "vm/ForkJoin.h"
 
 namespace js {
 
 class Shape;
 
+/*
+ * This auto class should be used around any code that might cause a mark bit to
+ * be set on an object in a dead zone. See AutoMaybeTouchDeadZones
+ * for more details.
+ */
+struct AutoMarkInDeadZone
+{
+    explicit AutoMarkInDeadZone(JS::Zone *zone)
+      : zone(zone),
+        scheduled(zone->scheduledForDestruction)
+    {
+        gc::GCRuntime &gc = zone->runtimeFromMainThread()->gc;
+        if (gc.isManipulatingDeadZones() && zone->scheduledForDestruction) {
+            gc.incObjectsMarkedInDeadZone();
+            zone->scheduledForDestruction = false;
+        }
+    }
+
+    ~AutoMarkInDeadZone() {
+        zone->scheduledForDestruction = scheduled;
+    }
+
+  private:
+    JS::Zone *zone;
+    bool scheduled;
+};
+
 inline Allocator *
 ThreadSafeContext::allocator() const
 {
     JS_ASSERT_IF(isJSContext(), &asJSContext()->zone()->allocator == allocator_);
     return allocator_;
 }
 
 template <typename T>
@@ -674,16 +701,33 @@ AllocateObjectForCacheHit(JSContext *cx,
     if (!obj && allowGC) {
         MaybeGC(cx);
         return nullptr;
     }
 
     return obj;
 }
 
+inline bool
+IsInsideGGCNursery(const js::gc::Cell *cell)
+{
+#ifdef JSGC_GENERATIONAL
+    if (!cell)
+        return false;
+    uintptr_t addr = uintptr_t(cell);
+    addr &= ~js::gc::ChunkMask;
+    addr |= js::gc::ChunkLocationOffset;
+    uint32_t location = *reinterpret_cast<uint32_t *>(addr);
+    JS_ASSERT(location != 0);
+    return location & js::gc::ChunkLocationBitNursery;
+#else
+    return false;
+#endif
+}
+
 } /* namespace gc */
 
 template <js::AllowGC allowGC>
 inline JSObject *
 NewGCObject(js::ThreadSafeContext *cx, js::gc::AllocKind kind, size_t nDynamicSlots, js::gc::InitialHeap heap)
 {
     JS_ASSERT(kind >= js::gc::FINALIZE_OBJECT0 && kind <= js::gc::FINALIZE_OBJECT_LAST);
     return js::gc::AllocateObject<allowGC>(cx, kind, nDynamicSlots, heap);
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -2500,16 +2500,19 @@ JSObject::TradeGuts(JSContext *cx, JSObj
     }
 #endif
 }
 
 /* Use this method with extreme caution. It trades the guts of two objects. */
 bool
 JSObject::swap(JSContext *cx, HandleObject a, HandleObject b)
 {
+    AutoMarkInDeadZone adc1(a->zone());
+    AutoMarkInDeadZone adc2(b->zone());
+
     // Ensure swap doesn't cause a finalizer to not be run.
     JS_ASSERT(IsBackgroundFinalized(a->tenuredGetAllocKind()) ==
               IsBackgroundFinalized(b->tenuredGetAllocKind()));
     JS_ASSERT(a->compartment() == b->compartment());
 
     unsigned r = NotifyGCPreSwap(a, b);
 
     TradeGutsReserved reserved(cx);
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -669,32 +669,17 @@ class JSObject : public js::ObjectImpl
     }
 
     void initDenseElements(uint32_t dstStart, const js::Value *src, uint32_t count) {
         JS_ASSERT(dstStart + count <= getDenseCapacity());
         memcpy(&elements[dstStart], src, count * sizeof(js::HeapSlot));
         DenseRangeWriteBarrierPost(runtimeFromMainThread(), this, dstStart, count);
     }
 
-    void initDenseElementsUnbarriered(uint32_t dstStart, const js::Value *src, uint32_t count) {
-        /*
-         * For use by parallel threads, which since they cannot see nursery
-         * things do not require a barrier.
-         */
-        JS_ASSERT(dstStart + count <= getDenseCapacity());
-#if defined(DEBUG) && defined(JSGC_GENERATIONAL)
-        JS_ASSERT(!js::gc::IsInsideNursery(this));
-        for (uint32_t index = 0; index < count; ++index) {
-            const JS::Value& value = src[index];
-            if (value.isMarkable())
-                JS_ASSERT(!js::gc::IsInsideNursery(static_cast<js::gc::Cell *>(value.toGCThing())));
-        }
-#endif
-        memcpy(&elements[dstStart], src, count * sizeof(js::HeapSlot));
-    }
+    void initDenseElementsUnbarriered(uint32_t dstStart, const js::Value *src, uint32_t count);
 
     void moveDenseElements(uint32_t dstStart, uint32_t srcStart, uint32_t count) {
         JS_ASSERT(dstStart + count <= getDenseCapacity());
         JS_ASSERT(srcStart + count <= getDenseInitializedLength());
 
         /*
          * Using memmove here would skip write barriers. Also, we need to consider
          * an array containing [A, B, C], in the following situation:
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -345,16 +345,38 @@ JSObject::ensureDenseElementsPreservePac
 inline js::Value
 JSObject::getDenseOrTypedArrayElement(uint32_t idx)
 {
     if (is<js::TypedArrayObject>())
         return as<js::TypedArrayObject>().getElement(idx);
     return getDenseElement(idx);
 }
 
+inline void
+JSObject::initDenseElementsUnbarriered(uint32_t dstStart, const js::Value *src, uint32_t count) {
+    /*
+     * For use by parallel threads, which since they cannot see nursery
+     * things do not require a barrier.
+     */
+    JS_ASSERT(dstStart + count <= getDenseCapacity());
+#if defined(DEBUG) && defined(JSGC_GENERATIONAL)
+    /*
+     * This asserts a global invariant: parallel code does not
+     * observe objects inside the generational GC's nursery.
+     */
+    JS_ASSERT(!js::gc::IsInsideGGCNursery(this));
+    for (uint32_t index = 0; index < count; ++index) {
+        const JS::Value& value = src[index];
+        if (value.isMarkable())
+            JS_ASSERT(!js::gc::IsInsideGGCNursery(static_cast<js::gc::Cell *>(value.toGCThing())));
+    }
+#endif
+    memcpy(&elements[dstStart], src, count * sizeof(js::HeapSlot));
+}
+
 /* static */ inline bool
 JSObject::setSingletonType(js::ExclusiveContext *cx, js::HandleObject obj)
 {
     JS_ASSERT_IF(cx->isJSContext(), !IsInsideNursery(obj));
 
     js::types::TypeObject *type = cx->getSingletonType(obj->getClass(), obj->getTaggedProto());
     if (!type)
         return false;
--- a/js/src/jswrapper.cpp
+++ b/js/src/jswrapper.cpp
@@ -39,16 +39,18 @@ Wrapper::defaultValue(JSContext *cx, Han
 }
 
 JSObject *
 Wrapper::New(JSContext *cx, JSObject *obj, JSObject *parent, Wrapper *handler,
              const WrapperOptions *options)
 {
     JS_ASSERT(parent);
 
+    AutoMarkInDeadZone amd(cx->zone());
+
     RootedValue priv(cx, ObjectValue(*obj));
     mozilla::Maybe<WrapperOptions> opts;
     if (!options) {
         opts.construct();
         opts.ref().selectDefaultClass(obj->isCallable());
         options = opts.addr();
     }
     return NewProxyObject(cx, handler, priv, options->proto(), parent, *options);
@@ -1036,16 +1038,18 @@ js::RemapAllWrappersForObject(JSContext 
 
     return true;
 }
 
 JS_FRIEND_API(bool)
 js::RecomputeWrappers(JSContext *cx, const CompartmentFilter &sourceFilter,
                       const CompartmentFilter &targetFilter)
 {
+    AutoMaybeTouchDeadZones agc(cx);
+
     AutoWrapperVector toRecompute(cx);
 
     for (CompartmentsIter c(cx->runtime(), SkipAtoms); !c.done(); c.next()) {
         // Filter by source compartment.
         if (!sourceFilter.match(c))
             continue;
 
         // Iterate over the wrappers, filtering appropriately.
--- a/js/src/jswrapper.h
+++ b/js/src/jswrapper.h
@@ -298,11 +298,39 @@ RemapAllWrappersForObject(JSContext *cx,
                           JSObject *newTarget);
 
 // API to recompute all cross-compartment wrappers whose source and target
 // match the given filters.
 JS_FRIEND_API(bool)
 RecomputeWrappers(JSContext *cx, const CompartmentFilter &sourceFilter,
                   const CompartmentFilter &targetFilter);
 
+/*
+ * This auto class should be used around any code, such as brain transplants,
+ * that may touch dead zones. Brain transplants can cause problems
+ * because they operate on all compartments, whether live or dead. A brain
+ * transplant can cause a formerly dead object to be "reanimated" by causing a
+ * read or write barrier to be invoked on it during the transplant. In this way,
+ * a zone becomes a zombie, kept alive by repeatedly consuming
+ * (transplanted) brains.
+ *
+ * To work around this issue, we observe when mark bits are set on objects in
+ * dead zones. If this happens during a brain transplant, we do a full,
+ * non-incremental GC at the end of the brain transplant. This will clean up any
+ * objects that were improperly marked.
+ */
+struct JS_FRIEND_API(AutoMaybeTouchDeadZones)
+{
+    // The version that takes an object just uses it for its runtime.
+    explicit AutoMaybeTouchDeadZones(JSContext *cx);
+    explicit AutoMaybeTouchDeadZones(JSObject *obj);
+    ~AutoMaybeTouchDeadZones();
+
+  private:
+    JSRuntime *runtime;
+    unsigned markCount;
+    bool inIncremental;
+    bool manipulatingDeadZones;
+};
+
 } /* namespace js */
 
 #endif /* jswrapper_h */
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -2088,17 +2088,17 @@ Debugger::addAllGlobalsAsDebuggees(JSCon
     for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
         // Invalidate a zone at a time to avoid doing a ZoneCellIter
         // per compartment.
         AutoDebugModeInvalidation invalidate(zone);
 
         for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
             if (c == dbg->object->compartment() || c->options().invisibleToDebugger())
                 continue;
-            c->scheduledForDestruction = false;
+            c->zone()->scheduledForDestruction = false;
             GlobalObject *global = c->maybeGlobal();
             if (global) {
                 Rooted<GlobalObject*> rg(cx, global);
                 if (!dbg->addDebuggeeGlobal(cx, rg, invalidate))
                     return false;
             }
         }
     }
@@ -2885,17 +2885,17 @@ Debugger::findAllGlobals(JSContext *cx, 
     RootedObject result(cx, NewDenseEmptyArray(cx));
     if (!result)
         return false;
 
     for (CompartmentsIter c(cx->runtime(), SkipAtoms); !c.done(); c.next()) {
         if (c->options().invisibleToDebugger())
             continue;
 
-        c->scheduledForDestruction = false;
+        c->zone()->scheduledForDestruction = false;
 
         GlobalObject *global = c->maybeGlobal();
 
         if (cx->runtime()->isSelfHostingGlobal(global))
             continue;
 
         if (global) {
             /*
--- a/js/src/vm/ProxyObject.cpp
+++ b/js/src/vm/ProxyObject.cpp
@@ -72,17 +72,24 @@ void
 ProxyObject::initHandler(BaseProxyHandler *handler)
 {
     initSlot(HANDLER_SLOT, PrivateValue(handler));
 }
 
 static void
 NukeSlot(ProxyObject *proxy, uint32_t slot)
 {
-    proxy->setReservedSlot(slot, NullValue());
+    Value old = proxy->getSlot(slot);
+    if (old.isMarkable()) {
+        Zone *zone = ZoneOfValue(old);
+        AutoMarkInDeadZone amd(zone);
+        proxy->setReservedSlot(slot, NullValue());
+    } else {
+        proxy->setReservedSlot(slot, NullValue());
+    }
 }
 
 void
 ProxyObject::nuke(BaseProxyHandler *handler)
 {
     /* Allow people to add their own number of reserved slots beyond the expected 4 */
     unsigned numSlots = JSCLASS_RESERVED_SLOTS(getClass());
     for (unsigned i = 0; i < numSlots; i++)
--- a/js/src/vm/StructuredClone.cpp
+++ b/js/src/vm/StructuredClone.cpp
@@ -569,17 +569,17 @@ SCInput::getPtr(const uint64_t *p, void 
 }
 
 bool
 SCInput::readPtr(void **p)
 {
     uint64_t u;
     if (!readNativeEndian(&u))
         return false;
-    *p = reinterpret_cast<void*>(u);
+    *p = reinterpret_cast<void*>(NativeEndian::swapFromLittleEndian(u));
     return true;
 }
 
 SCOutput::SCOutput(JSContext *cx) : cx(cx), buf(cx) {}
 
 bool
 SCOutput::write(uint64_t u)
 {
--- a/js/xpconnect/src/XPCWrappedNative.cpp
+++ b/js/xpconnect/src/XPCWrappedNative.cpp
@@ -351,16 +351,19 @@ XPCWrappedNative::GetNewOrUsed(xpcObject
 
     RootedObject parent(cx, Scope->GetGlobalJSObject());
 
     RootedValue newParentVal(cx, NullValue());
 
     mozilla::Maybe<JSAutoCompartment> ac;
 
     if (sciWrapper.GetFlags().WantPreCreate()) {
+        // PreCreate may touch dead compartments.
+        js::AutoMaybeTouchDeadZones agc(parent);
+
         RootedObject plannedParent(cx, parent);
         nsresult rv = sciWrapper.GetCallback()->PreCreate(identity, cx,
                                                           parent, parent.address());
         if (NS_FAILED(rv))
             return rv;
         rv = NS_OK;
 
         MOZ_ASSERT(!xpc::WrapperFactory::IsXrayWrapper(parent),
@@ -1277,16 +1280,19 @@ RescueOrphans(HandleObject obj)
     // NB: We pass stopAtOuter=false during the unwrap because Location objects
     // are parented to outer window proxies.
     nsresult rv;
     RootedObject parentObj(cx, js::GetObjectParent(obj));
     if (!parentObj)
         return NS_OK; // Global object. We're done.
     parentObj = js::UncheckedUnwrap(parentObj, /* stopAtOuter = */ false);
 
+    // PreCreate may touch dead compartments.
+    js::AutoMaybeTouchDeadZones agc(parentObj);
+
     // Recursively fix up orphans on the parent chain.
     rv = RescueOrphans(parentObj);
     NS_ENSURE_SUCCESS(rv, rv);
 
     // Now that we know our parent is in the right place, determine if we've
     // been orphaned. If not, we have nothing to do.
     if (!js::IsCrossCompartmentWrapper(parentObj))
         return NS_OK;
--- a/layout/reftests/text-svgglyphs/reftest.list
+++ b/layout/reftests/text-svgglyphs/reftest.list
@@ -5,19 +5,19 @@ pref(gfx.font_rendering.opentype_svg.ena
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-positioning.svg svg-glyph-positioning-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-html.html svg-glyph-html-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-direct.svg svg-glyph-direct-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-invalid.html svg-glyph-invalid-ref.html
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-objectfill-solid.svg svg-glyph-objectfill-solid-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-objectstroke-solid.svg svg-glyph-objectstroke-solid-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    fuzzy(1,6) == svg-glyph-objectgradient.svg svg-glyph-objectgradient-ref.svg # see bug 871961#c5
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-objectgradient-zoom.svg svg-glyph-objectgradient-zoom-ref.svg
-pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-objectpattern.svg svg-glyph-objectpattern-ref.svg
+pref(gfx.font_rendering.opentype_svg.enabled,true)    fuzzy-if(gtk2Widget,1,1438) fuzzy-if(winWidget,1,1954) fuzzy-if(Android||B2G,8,3795) == svg-glyph-objectpattern.svg svg-glyph-objectpattern-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == clip.html clip-ref.html
 pref(gfx.font_rendering.opentype_svg.enabled,true)    fuzzy(1,12) == svg-glyph-objectopacity.svg svg-glyph-objectopacity-ref.svg # see bug 871961#c5
-pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-objectopacity2.svg svg-glyph-objectopacity2-ref.svg
+pref(gfx.font_rendering.opentype_svg.enabled,true)    fuzzy-if(gtk2Widget,1,2268) fuzzy-if(winWidget,1,3074) fuzzy-if(Android||B2G,5,4715) == svg-glyph-objectopacity2.svg svg-glyph-objectopacity2-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-paintnone.svg svg-glyph-paintnone-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-cachedopacity.svg svg-glyph-cachedopacity-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    fuzzy-if(cocoaWidget,255,100) == svg-glyph-objectvalue.svg svg-glyph-objectvalue-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    fails == svg-glyph-mask.svg svg-glyph-mask-ref.svg # bug 872483
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-paint-server.svg svg-glyph-paint-server-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    == svg-glyph-transform.svg svg-glyph-transform-ref.svg
 pref(gfx.font_rendering.opentype_svg.enabled,true)    random-if(B2G&&browserIsRemote) == svg-glyph-extents.html svg-glyph-extents-ref.html
--- a/layout/svg/nsSVGPatternFrame.cpp
+++ b/layout/svg/nsSVGPatternFrame.cpp
@@ -174,36 +174,36 @@ IncludeBBoxScale(const nsSVGViewBox& aVi
   return (!aViewBox.IsExplicitlySet() &&
           aPatternContentUnits == SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) ||
          (aViewBox.IsExplicitlySet() &&
           aPatternUnits == SVG_UNIT_TYPE_OBJECTBOUNDINGBOX);
 }
 
 // Given the matrix for the pattern element's own transform, this returns a
 // combined matrix including the transforms applicable to its target.
-static gfxMatrix
+static Matrix
 GetPatternMatrix(uint16_t aPatternUnits,
-                 const gfxMatrix &patternTransform,
+                 const Matrix &patternTransform,
                  const gfxRect &bbox,
                  const gfxRect &callerBBox,
                  const Matrix &callerCTM)
 {
   // We really want the pattern matrix to handle translations
   gfxFloat minx = bbox.X();
   gfxFloat miny = bbox.Y();
 
   if (aPatternUnits == SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
     minx += callerBBox.X();
     miny += callerBBox.Y();
   }
 
   float scale = 1.0f / MaxExpansion(callerCTM);
-  gfxMatrix patternMatrix = patternTransform;
+  Matrix patternMatrix = patternTransform;
   patternMatrix.Scale(scale, scale);
-  patternMatrix.Translate(gfxPoint(minx, miny));
+  patternMatrix.Translate(minx, miny);
 
   return patternMatrix;
 }
 
 static nsresult
 GetTargetGeometry(gfxRect *aBBox,
                   const nsSVGViewBox &aViewBox,
                   uint16_t aPatternContentUnits,
@@ -225,42 +225,41 @@ GetTargetGeometry(gfxRect *aBBox,
   float scale = MaxExpansion(aContextMatrix);
   if (scale <= 0) {
     return NS_ERROR_FAILURE;
   }
   aBBox->Scale(scale);
   return NS_OK;
 }
 
-nsresult
-nsSVGPatternFrame::PaintPattern(gfxASurface** surface,
-                                gfxMatrix* patternMatrix,
-                                const gfxMatrix &aContextMatrix,
+TemporaryRef<SourceSurface>
+nsSVGPatternFrame::PaintPattern(Matrix* patternMatrix,
+                                const Matrix &aContextMatrix,
                                 nsIFrame *aSource,
                                 nsStyleSVGPaint nsStyleSVG::*aFillOrStroke,
                                 float aGraphicOpacity,
                                 const gfxRect *aOverrideBounds)
 {
   /*
    * General approach:
    *    Set the content geometry stuff
    *    Calculate our bbox (using x,y,width,height & patternUnits &
    *                        patternTransform)
    *    Create the surface
    *    Calculate the content transformation matrix
    *    Get our children (we may need to get them from another Pattern)
    *    Call SVGPaint on all of our children
    *    Return
    */
-  *surface = nullptr;
 
   // Get the first child of the pattern data we will render
   nsIFrame* firstKid = GetPatternFirstChild();
-  if (!firstKid)
-    return NS_ERROR_FAILURE; // Either no kids or a bad reference
+  if (!firstKid) {
+    return nullptr; // Either no kids or a bad reference
+  }
 
   const nsSVGViewBox& viewBox = GetViewBox();
 
   uint16_t patternContentUnits =
     GetEnumValue(SVGPatternElement::PATTERNCONTENTUNITS);
   uint16_t patternUnits =
     GetEnumValue(SVGPatternElement::PATTERNUNITS);
 
@@ -285,73 +284,80 @@ nsSVGPatternFrame::PaintPattern(gfxASurf
 
   // Get all of the information we need from our "caller" -- i.e.
   // the geometry that is being rendered with a pattern
   gfxRect callerBBox;
   if (NS_FAILED(GetTargetGeometry(&callerBBox,
                                   viewBox,
                                   patternContentUnits, patternUnits,
                                   aSource,
-                                  ToMatrix(aContextMatrix),
-                                  aOverrideBounds)))
-    return NS_ERROR_FAILURE;
+                                  aContextMatrix,
+                                  aOverrideBounds))) {
+    return nullptr;
+  }
 
   // Construct the CTM that we will provide to our children when we
   // render them into the tile.
   gfxMatrix ctm = ConstructCTM(viewBox, patternContentUnits, patternUnits,
-                               callerBBox, ToMatrix(aContextMatrix), aSource);
+                               callerBBox, aContextMatrix, aSource);
   if (ctm.IsSingular()) {
-    return NS_ERROR_FAILURE;
+    return nullptr;
   }
 
   // Get the pattern we are going to render
   nsSVGPatternFrame *patternFrame =
     static_cast<nsSVGPatternFrame*>(firstKid->GetParent());
   if (patternFrame->mCTM) {
     *patternFrame->mCTM = ctm;
   } else {
     patternFrame->mCTM = new gfxMatrix(ctm);
   }
 
   // Get the bounding box of the pattern.  This will be used to determine
   // the size of the surface, and will also be used to define the bounding
   // box for the pattern tile.
-  gfxRect bbox = GetPatternRect(patternUnits, callerBBox, ToMatrix(aContextMatrix), aSource);
+  gfxRect bbox = GetPatternRect(patternUnits, callerBBox, aContextMatrix, aSource);
   if (bbox.Width() <= 0.0 || bbox.Height() <= 0.0) {
-    return NS_ERROR_FAILURE;
+    return nullptr;
   }
 
   // Get the pattern transform
-  gfxMatrix patternTransform = GetPatternTransform();
+  Matrix patternTransform = ToMatrix(GetPatternTransform());
 
   // revert the vector effect transform so that the pattern appears unchanged
   if (aFillOrStroke == &nsStyleSVG::mStroke) {
-    patternTransform.Multiply(nsSVGUtils::GetStrokeTransform(aSource).Invert());
+    Matrix strokeTransform = ToMatrix(nsSVGUtils::GetStrokeTransform(aSource).Invert());
+    if (strokeTransform.IsSingular()) {
+      NS_WARNING("Should we get here if the stroke transform is singular?");
+      return nullptr;
+    }
+    patternTransform *= strokeTransform;
   }
 
   // Get the transformation matrix that we will hand to the renderer's pattern
   // routine.
   *patternMatrix = GetPatternMatrix(patternUnits, patternTransform,
-                                    bbox, callerBBox, ToMatrix(aContextMatrix));
+                                    bbox, callerBBox, aContextMatrix);
   if (patternMatrix->IsSingular()) {
-    return NS_ERROR_FAILURE;
+    return nullptr;
   }
 
   // Now that we have all of the necessary geometries, we can
   // create our surface.
-  gfxRect transformedBBox = patternTransform.TransformBounds(bbox);
+  gfxRect transformedBBox = ThebesRect(patternTransform.TransformBounds(ToRect(bbox)));
 
   bool resultOverflows;
   IntSize surfaceSize =
     nsSVGUtils::ConvertToSurfaceSize(
       transformedBBox.Size(), &resultOverflows).ToIntSize();
 
   // 0 disables rendering, < 0 is an error
-  if (surfaceSize.width <= 0 || surfaceSize.height <= 0)
-    return NS_ERROR_FAILURE;
+  if (surfaceSize.width <= 0 || surfaceSize.height <= 0) {
+    return nullptr;
+  }
 
   gfxFloat patternWidth = bbox.Width();
   gfxFloat patternHeight = bbox.Height();
 
   if (resultOverflows ||
       patternWidth != surfaceSize.width ||
       patternHeight != surfaceSize.height) {
     // scale drawing to pattern surface size
@@ -361,24 +367,25 @@ nsSVGPatternFrame::PaintPattern(gfxASurf
                 0.0f, 0.0f);
     patternFrame->mCTM->PreMultiply(tempTM);
 
     // and rescale pattern to compensate
     patternMatrix->Scale(patternWidth / surfaceSize.width,
                          patternHeight / surfaceSize.height);
   }
 
-  nsRefPtr<gfxASurface> tmpSurface =
-    gfxPlatform::GetPlatform()->CreateOffscreenSurface(surfaceSize,
-                                                       gfxContentType::COLOR_ALPHA);
-  if (!tmpSurface || tmpSurface->CairoStatus())
-    return NS_ERROR_FAILURE;
+  RefPtr<DrawTarget> dt =
+    gfxPlatform::GetPlatform()->
+      CreateOffscreenContentDrawTarget(surfaceSize,  SurfaceFormat::B8G8R8A8);
+  if (!dt) {
+    return nullptr;
+  }
 
   nsRefPtr<nsRenderingContext> context(new nsRenderingContext());
-  context->Init(aSource->PresContext()->DeviceContext(), tmpSurface);
+  context->Init(aSource->PresContext()->DeviceContext(), dt);
   gfxContext* gfx = context->ThebesContext();
 
   // Fill with transparent black
   gfx->SetOperator(gfxContext::OPERATOR_CLEAR);
   gfx->Paint();
   gfx->SetOperator(gfxContext::OPERATOR_OVER);
 
   if (aGraphicOpacity != 1.0f) {
@@ -415,18 +422,17 @@ nsSVGPatternFrame::PaintPattern(gfxASurf
 
   if (aGraphicOpacity != 1.0f) {
     gfx->PopGroupToSource();
     gfx->Paint(aGraphicOpacity);
     gfx->Restore();
   }
 
   // caller now owns the surface
-  tmpSurface.forget(surface);
-  return NS_OK;
+  return dt->Snapshot();
 }
 
 /* Will probably need something like this... */
 // How do we handle the insertion of a new frame?
 // We really don't want to rerender this every time,
 // do we?
 nsIFrame*
 nsSVGPatternFrame::GetPatternFirstChild()
@@ -702,33 +708,30 @@ nsSVGPatternFrame::GetPaintServerPattern
                                          const gfxRect *aOverrideBounds)
 {
   if (aGraphicOpacity == 0.0f) {
     nsRefPtr<gfxPattern> pattern = new gfxPattern(gfxRGBA(0, 0, 0, 0));
     return pattern.forget();
   }
 
   // Paint it!
-  nsRefPtr<gfxASurface> surface;
-  gfxMatrix pMatrix;
-  nsresult rv = PaintPattern(getter_AddRefs(surface), &pMatrix, aContextMatrix,
-                             aSource, aFillOrStroke, aGraphicOpacity, aOverrideBounds);
+  Matrix pMatrix;
+  RefPtr<SourceSurface> surface =
+    PaintPattern(&pMatrix, ToMatrix(aContextMatrix), aSource, aFillOrStroke,
+                 aGraphicOpacity, aOverrideBounds);
 
-  if (NS_FAILED(rv)) {
+  if (!surface) {
     return nullptr;
   }
 
-  pMatrix.Invert();
-
-  nsRefPtr<gfxPattern> pattern = new gfxPattern(surface);
+  nsRefPtr<gfxPattern> pattern = new gfxPattern(surface, pMatrix);
 
   if (!pattern || pattern->CairoStatus())
     return nullptr;
 
-  pattern->SetMatrix(pMatrix);
   pattern->SetExtend(gfxPattern::EXTEND_REPEAT);
   return pattern.forget();
 }
 
 // -------------------------------------------------------------------------
 // Public functions
 // -------------------------------------------------------------------------
 
--- a/layout/svg/nsSVGPatternFrame.h
+++ b/layout/svg/nsSVGPatternFrame.h
@@ -4,16 +4,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef __NS_SVGPATTERNFRAME_H__
 #define __NS_SVGPATTERNFRAME_H__
 
 #include "mozilla/Attributes.h"
 #include "gfxMatrix.h"
 #include "mozilla/gfx/2D.h"
+#include "mozilla/RefPtr.h"
 #include "nsSVGPaintServerFrame.h"
 
 class gfxASurface;
 class gfxContext;
 class nsIFrame;
 class nsSVGElement;
 class nsSVGLength2;
 class nsSVGPathGeometryFrame;
@@ -27,16 +28,18 @@ class nsSVGAnimatedTransformList;
 typedef nsSVGPaintServerFrame  nsSVGPatternFrameBase;
 
 /**
  * Patterns can refer to other patterns. We create an nsSVGPaintingProperty
  * with property type nsGkAtoms::href to track the referenced pattern.
  */
 class nsSVGPatternFrame : public nsSVGPatternFrameBase
 {
+  typedef mozilla::gfx::SourceSurface SourceSurface;
+
 public:
   NS_DECL_FRAMEARENA_HELPERS
 
   friend nsIFrame* NS_NewSVGPatternFrame(nsIPresShell* aPresShell,
                                          nsStyleContext* aContext);
 
   nsSVGPatternFrame(nsStyleContext* aContext);
 
@@ -104,23 +107,23 @@ protected:
     return GetPreserveAspectRatio(mContent);
   }
   const nsSVGLength2 *GetLengthValue(uint32_t aIndex, nsIContent *aDefault);
   const nsSVGLength2 *GetLengthValue(uint32_t aIndex)
   {
     return GetLengthValue(aIndex, mContent);
   }
 
-  nsresult PaintPattern(gfxASurface **surface,
-                        gfxMatrix *patternMatrix,
-                        const gfxMatrix &aContextMatrix,
-                        nsIFrame *aSource,
-                        nsStyleSVGPaint nsStyleSVG::*aFillOrStroke,
-                        float aGraphicOpacity,
-                        const gfxRect *aOverrideBounds);
+  mozilla::TemporaryRef<SourceSurface>
+  PaintPattern(Matrix *patternMatrix,
+               const Matrix &aContextMatrix,
+               nsIFrame *aSource,
+               nsStyleSVGPaint nsStyleSVG::*aFillOrStroke,
+               float aGraphicOpacity,
+               const gfxRect *aOverrideBounds);
   nsIFrame*  GetPatternFirstChild();
   gfxRect    GetPatternRect(uint16_t aPatternUnits,
                             const gfxRect &bbox,
                             const Matrix &callerCTM,
                             nsIFrame *aTarget);
   gfxMatrix  ConstructCTM(const nsSVGViewBox& aViewBox,
                           uint16_t aPatternContentUnits,
                           uint16_t aPatternUnits,
--- a/media/mtransport/test/transport_unittests.cpp
+++ b/media/mtransport/test/transport_unittests.cpp
@@ -15,16 +15,17 @@
 #include "logging.h"
 #include "nspr.h"
 #include "nss.h"
 #include "ssl.h"
 
 #include "nsThreadUtils.h"
 #include "nsXPCOM.h"
 
+#include "databuffer.h"
 #include "dtlsidentity.h"
 #include "nricectx.h"
 #include "nricemediastream.h"
 #include "transportflow.h"
 #include "transportlayer.h"
 #include "transportlayerdtls.h"
 #include "transportlayerice.h"
 #include "transportlayerlog.h"
@@ -37,16 +38,30 @@
 #include "gtest/gtest.h"
 #include "gtest_utils.h"
 
 using namespace mozilla;
 MOZ_MTLOG_MODULE("mtransport")
 
 MtransportTestUtils *test_utils;
 
+
+const uint8_t kTlsChangeCipherSpecType = 0x14;
+const uint8_t kTlsHandshakeType =        0x16;
+
+const uint8_t kTlsHandshakeCertificate = 0x0b;
+
+const uint8_t kTlsFakeChangeCipherSpec[] = {
+  kTlsChangeCipherSpecType,  // Type
+  0xfe, 0xff, // Version
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, // Fictitious sequence #
+  0x00, 0x01, // Length
+  0x01 // Value
+};
+
 // Layer class which can't be initialized.
 class TransportLayerDummy : public TransportLayer {
  public:
   TransportLayerDummy(bool allow_init, bool *destroyed)
       : allow_init_(allow_init),
         destroyed_(destroyed) {
     *destroyed_ = false;
   }
@@ -66,40 +81,57 @@ class TransportLayerDummy : public Trans
 
   TRANSPORT_LAYER_ID("lossy")
 
  private:
   bool allow_init_;
   bool *destroyed_;
 };
 
+class TransportLayerLossy;
+
+class Inspector {
+ public:
+  virtual ~Inspector() {}
+
+  virtual void Inspect(TransportLayer* layer,
+                       const unsigned char *data, size_t len) = 0;
+};
+
 // Class to simulate various kinds of network lossage
 class TransportLayerLossy : public TransportLayer {
  public:
-  TransportLayerLossy() : loss_mask_(0), packet_(0) {}
+  TransportLayerLossy() : loss_mask_(0), packet_(0), inspector_(nullptr) {}
   ~TransportLayerLossy () {}
 
   virtual TransportResult SendPacket(const unsigned char *data, size_t len) {
     MOZ_MTLOG(ML_NOTICE, LAYER_INFO << "SendPacket(" << len << ")");
 
     if (loss_mask_ & (1 << (packet_ % 32))) {
       MOZ_MTLOG(ML_NOTICE, "Dropping packet");
       ++packet_;
       return len;
     }
+    if (inspector_) {
+      inspector_->Inspect(this, data, len);
+    }
 
     ++packet_;
 
     return downward_->SendPacket(data, len);
   }
 
   void SetLoss(uint32_t packet) {
     loss_mask_ |= (1 << (packet & 32));
   }
 
+  void SetInspector(Inspector* inspector) {
+    inspector_ = inspector;
+  }
+
   void StateChange(TransportLayer *layer, State state) {
     TL_SET_STATE(state);
   }
 
   void PacketReceived(TransportLayer *layer, const unsigned char *data,
                       size_t len) {
     SignalPacketReceived(this, data, len);
   }
@@ -116,16 +148,133 @@ class TransportLayerLossy : public Trans
                 &TransportLayerLossy::StateChange);
 
     TL_SET_STATE(downward_->state());
   }
 
  private:
   uint32_t loss_mask_;
   uint32_t packet_;
+  ScopedDeletePtr<Inspector> inspector_;
+};
+
+// Process DTLS Records
+#define CHECK_LENGTH(expected) \
+  do { \
+    EXPECT_GE(remaining(), expected); \
+    if (remaining() < expected) return false; \
+  } while(0)
+
+class DtlsRecordParser {
+ public:
+  DtlsRecordParser(const unsigned char *data, size_t len)
+      : buffer_(data, len), offset_(0) {}
+
+  bool NextRecord(uint8_t* ct, RefPtr<DataBuffer>* buffer) {
+    if (!remaining())
+      return false;
+
+    CHECK_LENGTH(13U);
+    const uint8_t *ctp = reinterpret_cast<const uint8_t *>(ptr());
+    consume(11); // ct + version + length
+
+    const uint16_t *tmp = reinterpret_cast<const uint16_t*>(ptr());
+    size_t length = ntohs(*tmp);
+    consume(2);
+
+    CHECK_LENGTH(length);
+    DataBuffer* db = new DataBuffer(ptr(), length);
+    consume(length);
+
+    *ct = *ctp;
+    *buffer = db;
+
+    return true;
+  }
+
+ private:
+  size_t remaining() const { return buffer_.len() - offset_; }
+  const uint8_t *ptr() const { return buffer_.data() + offset_; }
+  void consume(size_t len) { offset_ += len; }
+
+
+  DataBuffer buffer_;
+  size_t offset_;
+};
+
+
+// Inspector that parses out DTLS records and passes
+// them on.
+class DtlsRecordInspector : public Inspector {
+ public:
+  virtual void Inspect(TransportLayer* layer,
+                       const unsigned char *data, size_t len) {
+    DtlsRecordParser parser(data, len);
+
+    uint8_t ct;
+    RefPtr<DataBuffer> buf;
+    while(parser.NextRecord(&ct, &buf)) {
+      OnRecord(layer, ct, buf->data(), buf->len());
+    }
+  }
+
+  virtual void OnRecord(TransportLayer* layer,
+                        uint8_t content_type,
+                        const unsigned char *record,
+                        size_t len) = 0;
+};
+
+// Inspector that injects arbitrary packets based on
+// DTLS records of various types.
+class DtlsInspectorInjector : public DtlsRecordInspector {
+ public:
+  DtlsInspectorInjector(uint8_t packet_type, uint8_t handshake_type,
+                    const unsigned char *data, size_t len) :
+      packet_type_(packet_type),
+      handshake_type_(handshake_type),
+      injected_(false) {
+    data_ = new unsigned char[len];
+    memcpy(data_, data, len);
+    len_ = len;
+  }
+
+  virtual void OnRecord(TransportLayer* layer,
+                        uint8_t content_type,
+                        const unsigned char *data, size_t len) {
+    // Only inject once.
+    if (injected_) {
+      return;
+    }
+
+    // Check that the first byte is as requested.
+    if (content_type != packet_type_) {
+      return;
+    }
+
+    if (handshake_type_ != 0xff) {
+      // Check that the packet is plausibly long enough.
+      if (len < 1) {
+        return;
+      }
+
+      // Check that the handshake type is as requested.
+      if (data[0] != handshake_type_) {
+        return;
+      }
+    }
+
+    layer->SendPacket(data_, len_);
+  }
+
+ private:
+  uint8_t packet_type_;
+  uint8_t handshake_type_;
+  bool injected_;
+  ScopedDeleteArray<unsigned char> data_;
+  size_t len_;
 };
 
 namespace {
 class TransportTestPeer : public sigslot::has_slots<> {
  public:
   TransportTestPeer(nsCOMPtr<nsIEventTarget> target, std::string name)
       : name_(name), target_(target),
         received_(0), flow_(new TransportFlow(name)),
@@ -357,16 +506,20 @@ class TransportTestPeer : public sigslot
     std::cerr << "Received " << len << " bytes" << std::endl;
     ++received_;
   }
 
   void SetLoss(uint32_t loss) {
     lossy_->SetLoss(loss);
   }
 
+  void SetInspector(Inspector* inspector) {
+    lossy_->SetInspector(inspector);
+  }
+
   TransportLayer::State state() {
     TransportLayer::State tstate;
 
     RUN_ON_THREAD(test_utils->sts_target(),
                   WrapRunnableRet(flow_, &TransportFlow::state, &tstate));
 
     return tstate;
   }
@@ -545,16 +698,27 @@ TEST_F(TransportTest, TestConnectTwoDige
 }
 
 TEST_F(TransportTest, TestConnectTwoDigestsBothBad) {
   SetDtlsPeer(2, 3);
 
   ConnectSocketExpectFail();
 }
 
+TEST_F(TransportTest, TestConnectInjectCCS) {
+  SetDtlsPeer();
+  p2_->SetInspector(new DtlsInspectorInjector(
+      kTlsHandshakeType,
+      kTlsHandshakeCertificate,
+      kTlsFakeChangeCipherSpec,
+      sizeof(kTlsFakeChangeCipherSpec)));
+
+  ConnectSocket();
+}
+
 TEST_F(TransportTest, TestTransfer) {
   SetDtlsPeer();
   ConnectSocket();
   TransferTest(1);
 }
 
 TEST_F(TransportTest, TestConnectLoseFirst) {
   SetDtlsPeer();
--- a/mobile/android/base/tests/robocop.ini
+++ b/mobile/android/base/tests/robocop.ini
@@ -62,18 +62,16 @@ skip-if = android_version == "10"
 [testOverscroll]
 [testPanCorrectness]
 # disabled on x86 only; bug 927476
 skip-if = processor == "x86"
 # [testPasswordEncrypt] # see bug 824067
 [testPasswordProvider]
 # [testPermissions] # see bug 757475
 [testPictureLinkContextMenu]
-# disabled on 2.3; bug 986164
-skip-if = android_version == "10"
 [testPrefsObserver]
 [testPrivateBrowsing]
 [testPromptGridInput]
 # disabled on x86 only; bug 957185
 skip-if = processor == "x86"
 # [testReaderMode] # see bug 913254, 936224
 [testReadingListProvider]
 [testSearchSuggestions]
--- a/moz.build
+++ b/moz.build
@@ -34,26 +34,25 @@ if not CONFIG['LIBXUL_SDK']:
         if not CONFIG['MOZ_NATIVE_ZLIB']:
             add_tier_dir('base', ['modules/zlib'])
 
         add_tier_dir('base', ['mozglue', 'memory/mozalloc'])
 
 if not CONFIG['JS_STANDALONE']:
     add_tier_dir('precompile', 'xpcom/xpidl')
 
-    if CONFIG['COMPILE_ENVIRONMENT'] and not CONFIG['LIBXUL_SDK']:
-        if not CONFIG['MOZ_NATIVE_NSPR']:
-            add_tier_dir('nspr', 'config/nspr')
+if CONFIG['COMPILE_ENVIRONMENT'] and not CONFIG['LIBXUL_SDK']:
+    if CONFIG['MOZ_BUILD_NSPR']:
+        add_tier_dir('nspr', 'config/nspr')
 
+    if not CONFIG['JS_STANDALONE']:
         add_tier_dir('external', 'config/external')
-
         if not CONFIG['MOZ_NATIVE_NSS']:
              add_tier_dir('nss', 'security/build')
 
-if CONFIG['COMPILE_ENVIRONMENT'] and not CONFIG['LIBXUL_SDK']:
     if CONFIG['BUILD_CTYPES'] and not CONFIG['MOZ_NATIVE_FFI']:
         add_tier_dir('js', ['js/src/ctypes/libffi'], static=True)
     add_tier_dir('js', ['intl/icu'], static=True)
     CONFIGURE_SUBST_FILES += ['intl/icu/Makefile']
     add_tier_dir('js', ['js/src'])
 
 if not CONFIG['JS_STANDALONE']:
     # Bring in the configuration for the configured application.
--- a/netwerk/protocol/http/nsHttpConnectionMgr.cpp
+++ b/netwerk/protocol/http/nsHttpConnectionMgr.cpp
@@ -28,16 +28,18 @@
 #include "NullHttpTransaction.h"
 #include "nsITransport.h"
 #include "nsISocketTransportService.h"
 #include <algorithm>
 #include "Http2Compression.h"
 #include "mozilla/ChaosMode.h"
 #include "mozilla/unused.h"
 
+#include "mozilla/Telemetry.h"
+
 // defined by the socket transport service while active
 extern PRThread *gSocketThread;
 
 namespace mozilla {
 namespace net {
 
 //-----------------------------------------------------------------------------
 
@@ -1368,16 +1370,18 @@ nsHttpConnectionMgr::MakeNewConnection(n
             // open list. Remove the speculative bit from it and that
             // connection can later be used for this transaction
             // (or another one in the pending queue) - we don't
             // need to open a new connection here.
             LOG(("nsHttpConnectionMgr::MakeNewConnection [ci = %s]\n"
                  "Found a speculative half open connection\n",
                  ent->mConnInfo->HashKey().get()));
             ent->mHalfOpens[i]->SetSpeculative(false);
+            Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_USED_SPECULATIVE_CONN> usedSpeculativeConn;
+            ++usedSpeculativeConn;
 
             // return OK because we have essentially opened a new connection
             // by converting a speculative half-open to general use
             return NS_OK;
         }
     }
 
     // If this host is trying to negotiate a SPDY session right now,
@@ -2023,18 +2027,22 @@ nsresult
 nsHttpConnectionMgr::CreateTransport(nsConnectionEntry *ent,
                                      nsAHttpTransaction *trans,
                                      uint32_t caps,
                                      bool speculative)
 {
     MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
 
     nsRefPtr<nsHalfOpenSocket> sock = new nsHalfOpenSocket(ent, trans, caps);
-    if (speculative)
+    if (speculative) {
         sock->SetSpeculative(true);
+        Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_TOTAL_SPECULATIVE_CONN> totalSpeculativeConn;
+        ++totalSpeculativeConn;
+    }
+
     nsresult rv = sock->SetupPrimaryStreams();
     NS_ENSURE_SUCCESS(rv, rv);
 
     ent->mHalfOpens.AppendElement(sock);
     mNumHalfOpenConns++;
     return NS_OK;
 }
 
@@ -2998,16 +3006,21 @@ nsHttpConnectionMgr::nsHalfOpenSocket::C
     LOG(("nsHalfOpenSocket::CancelBackupTimer()"));
     mSynTimer->Cancel();
     mSynTimer = nullptr;
 }
 
 void
 nsHttpConnectionMgr::nsHalfOpenSocket::Abandon()
 {
+    if (IsSpeculative()) {
+      Telemetry::AutoCounter<Telemetry::HTTPCONNMGR_UNUSED_SPECULATIVE_CONN> unusedSpeculativeConn;
+      ++unusedSpeculativeConn;
+    }
+
     LOG(("nsHalfOpenSocket::Abandon [this=%p ent=%s]",
          this, mEnt->mConnInfo->Host()));
 
     MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
 
     nsRefPtr<nsHalfOpenSocket> deleteProtector(this);
 
     // Tell socket (and backup socket) to forget the half open socket.
--- a/services/fxaccounts/FxAccounts.jsm
+++ b/services/fxaccounts/FxAccounts.jsm
@@ -198,17 +198,17 @@ AccountState.prototype = {
   reject: function(error) {
     // It could be argued that we should just let it reject with the original
     // error - but this runs the risk of the error being (eg) a 401, which
     // might cause the consumer to attempt some remediation and cause other
     // problems.
     if (!this.isCurrent) {
       log.info("An accountState promise was rejected, but we are ignoring that" +
                "reason and rejecting it due to a different user being signed in." +
-               "Originally rejected with: " + reason);
+               "Originally rejected with: " + error);
       return Promise.reject(new Error("A different user signed in"));
     }
     return Promise.reject(error);
   },
 
 }
 
 /**
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -2289,16 +2289,40 @@
   },
   "PREDICTOR_STARTUP_COUNT_OVERFLOWS": {
       "expires_in_version": "never",
       "kind": "linear",
       "high": "100",
       "n_buckets": 50,
       "description": "Number of times startup count overflowed"
   },
+  "HTTPCONNMGR_TOTAL_SPECULATIVE_CONN": {
+    "expires_in_version": "never",
+    "kind": "exponential",
+    "high": "1000 * 1000",
+    "n_buckets": 50,
+    "extended_statistics_ok": true,
+    "description": "How many speculative http connections are created"
+  },
+  "HTTPCONNMGR_USED_SPECULATIVE_CONN": {
+    "expires_in_version": "never",
+    "kind": "exponential",
+    "high": "1000 * 1000",
+    "n_buckets": 50,
+    "extended_statistics_ok": true,
+    "description": "How many speculative http connections are actually used"
+  },
+  "HTTPCONNMGR_UNUSED_SPECULATIVE_CONN": {
+    "expires_in_version": "never",
+    "kind": "exponential",
+    "high": "1000 * 1000",
+    "n_buckets": 50,
+    "extended_statistics_ok": true,
+    "description": "How many speculative connections are made needlessly"
+  },
   "FIND_PLUGINS": {
     "expires_in_version": "never",
     "kind": "exponential",
     "high": "3000",
     "n_buckets": 10,
     "extended_statistics_ok": true,
     "description": "Time spent scanning filesystem for plugins (ms)"
   },
--- a/widget/windows/nsWindow.cpp
+++ b/widget/windows/nsWindow.cpp
@@ -5723,19 +5723,19 @@ nsWindow::GetMessageTimeStamp(LONG aEven
     cyclesToAdd--;
   // Likewise, if our rough calculation says we've just wrapped but actually the
   // event time is just after the wrap point, we need to add an extra wrap.
   } else if (intervalFraction > 0.9 &&
              timeSinceFirstEvent < kEventTimeRange * 0.1) {
     cyclesToAdd++;
   }
 
-  if (timesWrapped > 0) {
+  if (cyclesToAdd > 0) {
     eventTimeStamp +=
-      TimeDuration::FromMilliseconds(kEventTimeRange * timesWrapped);
+      TimeDuration::FromMilliseconds(kEventTimeRange * cyclesToAdd);
   }
 
   return eventTimeStamp;
 }
 
 void
 nsWindow::UpdateFirstEventTime(DWORD aEventTime)
 {