Bug 1319531 - Ensure all streams provided by DatabaseFile can be read without generating NS_BASE_STREAM_WOULD_BLOCK errors. r=janv a=gchang
authorAndrew Sutherland <asutherland@asutherland.org>
Mon, 30 Jan 2017 01:07:27 -0500
changeset 375841 fb3814280e1a5b280c3498ebc3b5590bebdfc0c7
parent 375840 d06f2bf30b22a04b03e2046d5c148adedd6777c1
child 375842 28160f4776bd67cd0836aa09468052fa1f8f3d28
push id6996
push userjlorenzo@mozilla.com
push dateMon, 06 Mar 2017 20:48:21 +0000
treeherdermozilla-beta@d89512dab048 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjanv, gchang
bugs1319531
milestone53.0a2
Bug 1319531 - Ensure all streams provided by DatabaseFile can be read without generating NS_BASE_STREAM_WOULD_BLOCK errors. r=janv a=gchang IndexedDB database operations are written such that they must execute synchronously. For this reason, the add/put operation reads/writes its Blobs to disk in a synchronous fashion. However, with the introduction of SendStream-backed Blobs for large (>1 MiB) blobs whose contents are streamed to the parent replacing nsStringInputStream-backed Blobs (whose contents were sent up in a single IPC message that might exceed the size limit and cause a crash), this has no longer been a safe assumption. However, the problems weren't immediately obvious because most pre-WASM Blobs are smaller than the 1MiB threshold and even when they exceeded the size, their memory-backed contents could rapidly be sent to the parent via IPC, making NS_BASE_STREAM_WOULD_BLOCK errors rare/hard to reproduce. (rr and its enforced single-threading is a good way to reproduce, however. Also, see the testing patch on the bug that introduces artificial delays into SendStream.) Included SpecialPowersObserver.jsm minor changes to "CreateFiles": - appendRelativePath is used instead of appendPath because appendPath only allows a single path component to be appended. In other words, file creation is limited to files at the root of the profile directory using appendPath, which is needlessly limiting. - outStream is now closed even if no data is provided. This is essential on windows where file deletion only occurs when all handles are closed. Without this fix, "RemoveFiles" side-effect of deleting the created files might not take effect until garbage collection runs and collects the outStream.
dom/indexedDB/ActorsParent.cpp
dom/indexedDB/test/helpers.js
dom/indexedDB/test/mochitest.ini
dom/indexedDB/test/test_file_put_deleted.html
testing/specialpowers/content/SpecialPowersObserver.jsm
--- a/dom/indexedDB/ActorsParent.cpp
+++ b/dom/indexedDB/ActorsParent.cpp
@@ -74,16 +74,17 @@
 #include "nsIEventTarget.h"
 #include "nsIFile.h"
 #include "nsIFileURL.h"
 #include "nsIFileProtocolHandler.h"
 #include "nsIInputStream.h"
 #include "nsIInterfaceRequestor.h"
 #include "nsInterfaceHashtable.h"
 #include "nsIOutputStream.h"
+#include "nsIPipe.h"
 #include "nsIPrincipal.h"
 #include "nsIScriptSecurityManager.h"
 #include "nsISupports.h"
 #include "nsISupportsImpl.h"
 #include "nsISupportsPriority.h"
 #include "nsIThread.h"
 #include "nsITimer.h"
 #include "nsIURI.h"
@@ -6630,16 +6631,46 @@ private:
 
   bool
   SendFailureResult(nsresult aResultCode) override;
 
   void
   Cleanup() override;
 };
 
+/**
+ * In coordination with IDBDatabase's mFileActors weak-map on the child side, a
+ * long-lived mapping from a child process's live Blobs to their corresponding
+ * FileInfo in our owning database.  Assists in avoiding redundant IPC traffic
+ * and disk storage.  This includes both:
+ * - Blobs retrieved from this database and sent to the child that do not need
+ *   to be written to disk because they already exist on disk in this database's
+ *   files directory.
+ * - Blobs retrieved from other databases (that are therefore !IsShareable())
+ *   or from anywhere else that will need to be written to this database's files
+ *   directory.  In this case we will hold a reference to its BlobImpl in
+ *   mBlobImpl until we have successfully written the Blob to disk.
+ *
+ * Relevant Blob context: Blobs sent from the parent process to child processes
+ * are automatically linked back to their source BlobImpl when the child process
+ * references the Blob via IPC.  (This is true even when a new "KnownBlob" actor
+ * must be created because the reference is occurring on a different thread than
+ * the PBlob actor created when the blob was sent to the child.)  However, when
+ * getting an actor in the child process for sending an in-child-created Blob to
+ * the parent process, there is (currently) no Blob machinery to automatically
+ * establish and reuse a long-lived Actor.  As a result, without IDB's weak-map
+ * cleverness, a memory-backed Blob repeatedly sent from the child to the parent
+ * would appear as a different Blob each time, requiring the Blob data to be
+ * sent over IPC each time as well as potentially needing to be written to disk
+ * each time.
+ *
+ * This object remains alive as long as there is an active child actor or an
+ * ObjectStoreAddOrPutRequestOp::StoredFileInfo for a queued or active add/put
+ * op is holding a reference to us.
+ */
 class DatabaseFile final
   : public PBackgroundIDBDatabaseFileParent
 {
   friend class Database;
 
   RefPtr<BlobImpl> mBlobImpl;
   RefPtr<FileInfo> mFileInfo;
 
@@ -6649,30 +6680,64 @@ public:
   FileInfo*
   GetFileInfo() const
   {
     AssertIsOnBackgroundThread();
 
     return mFileInfo;
   }
 
+  /**
+   * Is there a blob available to provide an input stream?  This may be called
+   * on any thread under current lifecycle constraints.
+   */
+  bool
+  HasBlobImpl() const
+  {
+    return (bool)mBlobImpl;
+  }
+
+  /**
+   * If mBlobImpl is non-null (implying the contents of this file have not yet
+   * been written to disk), then return an input stream that is guaranteed to
+   * block on reads and never return NS_BASE_STREAM_WOULD_BLOCK.  If mBlobImpl
+   * is null (because the contents have been written to disk), returns null.
+   *
+   * Because this method does I/O, it should only be called on a database I/O
+   * thread, not on PBackground.  Note that we actually open the stream on this
+   * thread, where previously it was opened on PBackground.  This is safe and
+   * equally efficient because blob implementations are thread-safe and blobs in
+   * the parent are fully populated.  (This would not be efficient in the child
+   * where the open request would have to be dispatched back to the PBackground
+   * thread because of the need to potentially interact with actors.)
+   *
+   * We enforce this guarantee by wrapping the stream in a blocking pipe unless
+   * either is true:
+   * - The stream is already a blocking stream.  (AKA it's not non-blocking.)
+   *   This is the case for nsFileStreamBase-derived implementations and
+   *   appropriately configured nsPipes (but not PSendStream-generated pipes).
+   * - The stream already contains the entire contents of the Blob.  For
+   *   example, nsStringInputStreams report as non-blocking, but also have their
+   *   entire contents synchronously available, so they will never return
+   *   NS_BASE_STREAM_WOULD_BLOCK.  There is no need to wrap these in a pipe.
+   *   (It's also very common for SendStream-based Blobs to have their contents
+   *   entirely streamed into the parent process by the time this call is
+   *   issued.)
+   *
+   * This additional logic is necessary because our database operations all
+   * are written in such a way that the operation is assumed to have completed
+   * when they yield control-flow, and:
+   * - When memory-backed blobs cross a certain threshold (1MiB at the time of
+   *   writing), they will be sent up from the child via PSendStream in chunks
+   *   to a non-blocking pipe that will return NS_BASE_STREAM_WOULD_BLOCK.
+   * - Other Blob types could potentially be non-blocking.  (We're not making
+   *   any assumptions.)
+   */
   already_AddRefed<nsIInputStream>
-  GetInputStream() const
-  {
-    AssertIsOnBackgroundThread();
-
-    nsCOMPtr<nsIInputStream> inputStream;
-    if (mBlobImpl) {
-      ErrorResult rv;
-      mBlobImpl->GetInternalStream(getter_AddRefs(inputStream), rv);
-      MOZ_ALWAYS_TRUE(!rv.Failed());
-    }
-
-    return inputStream.forget();
-  }
+  GetBlockingInputStream(ErrorResult &rv) const;
 
   void
   ClearInputStream()
   {
     AssertIsOnBackgroundThread();
 
     mBlobImpl = nullptr;
   }
@@ -6697,21 +6762,93 @@ private:
   }
 
   ~DatabaseFile() override = default;
 
   void
   ActorDestroy(ActorDestroyReason aWhy) override
   {
     AssertIsOnBackgroundThread();
-
-    mBlobImpl = nullptr;
-    mFileInfo = nullptr;
-  }
-};
+  }
+};
+
+already_AddRefed<nsIInputStream>
+DatabaseFile::GetBlockingInputStream(ErrorResult &rv) const
+{
+  // We should only be called from a database I/O thread, not the background
+  // thread.  The background thread should call HasBlobImpl().
+  MOZ_ASSERT(!IsOnBackgroundThread());
+
+  if (!mBlobImpl) {
+    return nullptr;
+  }
+
+  nsCOMPtr<nsIInputStream> inputStream;
+  mBlobImpl->GetInternalStream(getter_AddRefs(inputStream), rv);
+  if (rv.Failed()) {
+    return nullptr;
+  }
+
+  // If it's non-blocking we may need a pipe.
+  bool pipeNeeded;
+  rv = inputStream->IsNonBlocking(&pipeNeeded);
+  if (rv.Failed()) {
+    return nullptr;
+  }
+
+  // We don't need a pipe if all the bytes might already be available.
+  if (pipeNeeded) {
+    uint64_t available;
+    rv = inputStream->Available(&available);
+    if (rv.Failed()) {
+      return nullptr;
+    }
+
+    uint64_t blobSize = mBlobImpl->GetSize(rv);
+    if (rv.Failed()) {
+      return nullptr;
+    }
+
+    if (available == blobSize) {
+      pipeNeeded = false;
+    }
+  }
+
+  if (pipeNeeded) {
+    nsCOMPtr<nsIEventTarget> target =
+      do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID);
+    if (!target) {
+      rv.Throw(NS_ERROR_UNEXPECTED);
+      return nullptr;
+    }
+
+    nsCOMPtr<nsIInputStream> pipeInputStream;
+    nsCOMPtr<nsIOutputStream> pipeOutputStream;
+
+    rv = NS_NewPipe(
+      getter_AddRefs(pipeInputStream),
+      getter_AddRefs(pipeOutputStream),
+      0, 0, // default buffering is fine;
+      false, // we absolutely want a blocking input stream
+      true); // we don't need the writer to block
+    if (rv.Failed()) {
+      return nullptr;
+    }
+
+    rv = NS_AsyncCopy(inputStream, pipeOutputStream, target);
+    if (rv.Failed()) {
+      return nullptr;
+    }
+
+    inputStream = pipeInputStream;
+  }
+
+  return inputStream.forget();
+}
+
 
 class TransactionBase
 {
   friend class Cursor;
 
   class CommitOp;
 
 protected:
@@ -8215,16 +8352,18 @@ private:
   void
   Cleanup() override;
 };
 
 struct ObjectStoreAddOrPutRequestOp::StoredFileInfo final
 {
   RefPtr<DatabaseFile> mFileActor;
   RefPtr<FileInfo> mFileInfo;
+  // A non-Blob-backed inputstream to write to disk.  If null, mFileActor may
+  // still have a stream for us to write.
   nsCOMPtr<nsIInputStream> mInputStream;
   StructuredCloneFile::FileType mType;
   bool mCopiedSuccessfully;
 
   StoredFileInfo()
     : mType(StructuredCloneFile::eBlob)
     , mCopiedSuccessfully(false)
   {
@@ -8446,17 +8585,17 @@ protected:
 
 protected:
   IndexRequestOpBase(TransactionBase* aTransaction,
                      const RequestParams& aParams)
     : NormalTransactionOp(aTransaction)
     , mMetadata(IndexMetadataForParams(aTransaction, aParams))
   { }
 
-  
+
   ~IndexRequestOpBase() override = default;
 
 private:
   static already_AddRefed<FullIndexMetadata>
   IndexMetadataForParams(TransactionBase* aTransaction,
                          const RequestParams& aParams);
 };
 
@@ -8655,17 +8794,17 @@ protected:
 #ifdef DEBUG
     , mResponseSent(false)
 #endif
   {
     AssertIsOnBackgroundThread();
     MOZ_ASSERT(aCursor);
   }
 
-  
+
   ~CursorOpBase() override = default;
 
   bool
   SendFailureResult(nsresult aResultCode) override;
 
   void
   Cleanup() override;
 
@@ -26041,19 +26180,17 @@ ObjectStoreAddOrPutRequestOp::Init(Trans
           storedFileInfo->mFileActor =
             static_cast<DatabaseFile*>(
               file.get_PBackgroundIDBDatabaseFileParent());
           MOZ_ASSERT(storedFileInfo->mFileActor);
 
           storedFileInfo->mFileInfo = storedFileInfo->mFileActor->GetFileInfo();
           MOZ_ASSERT(storedFileInfo->mFileInfo);
 
-          storedFileInfo->mInputStream =
-            storedFileInfo->mFileActor->GetInputStream();
-          if (storedFileInfo->mInputStream && !mFileManager) {
+          if (storedFileInfo->mFileActor->HasBlobImpl() && !mFileManager) {
             mFileManager = fileManager;
           }
 
           storedFileInfo->mType = StructuredCloneFile::eBlob;
           break;
         }
 
         case StructuredCloneFile::eMutableFile: {
@@ -26080,19 +26217,17 @@ ObjectStoreAddOrPutRequestOp::Init(Trans
           storedFileInfo->mFileActor =
             static_cast<DatabaseFile*>(
               file.get_PBackgroundIDBDatabaseFileParent());
           MOZ_ASSERT(storedFileInfo->mFileActor);
 
           storedFileInfo->mFileInfo = storedFileInfo->mFileActor->GetFileInfo();
           MOZ_ASSERT(storedFileInfo->mFileInfo);
 
-          storedFileInfo->mInputStream =
-            storedFileInfo->mFileActor->GetInputStream();
-          if (storedFileInfo->mInputStream && !mFileManager) {
+          if (storedFileInfo->mFileActor->HasBlobImpl() && !mFileManager) {
             mFileManager = fileManager;
           }
 
           storedFileInfo->mType = fileAddInfo.type();
           break;
         }
 
         default:
@@ -26320,18 +26455,39 @@ ObjectStoreAddOrPutRequestOp::DoDatabase
     nsAutoString fileIds;
 
     for (uint32_t count = mStoredFileInfos.Length(), index = 0;
          index < count;
          index++) {
       StoredFileInfo& storedFileInfo = mStoredFileInfos[index];
       MOZ_ASSERT(storedFileInfo.mFileInfo);
 
+      // If there is a StoredFileInfo, then one of the following is true:
+      // - This was an overflow structured clone and storedFileInfo.mInputStream
+      //   MUST be non-null.
+      // - This is a reference to a Blob that may or may not have already been
+      //   written to disk.  storedFileInfo.mFileActor MUST be non-null, but
+      //   its HasBlobImpl() may return false and so GetBlockingInputStream may
+      //   return null (so don't assert on them).
+      // - It's a mutable file.  No writing will be performed.
+      MOZ_ASSERT(storedFileInfo.mInputStream || storedFileInfo.mFileActor ||
+                 storedFileInfo.mType == StructuredCloneFile::eMutableFile);
+
       nsCOMPtr<nsIInputStream> inputStream;
+      // Check for an explicit stream, like a structured clone stream.
       storedFileInfo.mInputStream.swap(inputStream);
+      // Check for a blob-backed stream otherwise.
+      if (!inputStream && storedFileInfo.mFileActor) {
+        ErrorResult streamRv;
+        inputStream =
+          storedFileInfo.mFileActor->GetBlockingInputStream(streamRv);
+        if (NS_WARN_IF(streamRv.Failed())) {
+          return streamRv.StealNSResult();
+        }
+      }
 
       if (inputStream) {
         RefPtr<FileInfo>& fileInfo = storedFileInfo.mFileInfo;
 
         nsCOMPtr<nsIFile> file = fileHelper->GetFile(fileInfo);
         if (NS_WARN_IF(!file)) {
           IDB_REPORT_INTERNAL_ERR();
           return NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR;
--- a/dom/indexedDB/test/helpers.js
+++ b/dom/indexedDB/test/helpers.js
@@ -253,16 +253,23 @@ function continueToNextStepSync()
 }
 
 function errorHandler(event)
 {
   ok(false, "indexedDB error, '" + event.target.error.name + "'");
   finishTest();
 }
 
+// For error callbacks where the argument is not an event object.
+function errorCallbackHandler(err)
+{
+  ok(false, "got unexpected error callback: " + err);
+  finishTest();
+}
+
 function expectUncaughtException(expecting)
 {
   SimpleTest.expectUncaughtException(expecting);
 }
 
 function browserErrorHandler(event)
 {
   browserFinishTest();
--- a/dom/indexedDB/test/mochitest.ini
+++ b/dom/indexedDB/test/mochitest.ini
@@ -151,16 +151,17 @@ support-files =
 [test_error_events_abort_transactions.html]
 [test_event_propagation.html]
 [test_event_source.html]
 [test_exceptions_in_events.html]
 [test_file_array.html]
 [test_file_cross_database_copying.html]
 [test_file_delete.html]
 [test_file_os_delete.html]
+[test_file_put_deleted.html]
 [test_file_put_get_object.html]
 [test_file_put_get_values.html]
 [test_file_replace.html]
 [test_file_resurrection_delete.html]
 [test_file_resurrection_transaction_abort.html]
 [test_file_sharing.html]
 [test_file_transaction_abort.html]
 [test_filehandle_append_read_data.html]
new file mode 100644
--- /dev/null
+++ b/dom/indexedDB/test/test_file_put_deleted.html
@@ -0,0 +1,155 @@
+<!--
+  Any copyright is dedicated to the Public Domain.
+  http://creativecommons.org/publicdomain/zero/1.0/
+-->
+<html>
+<head>
+  <title>Indexed Database Property Test</title>
+
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+
+  <script type="text/javascript;version=1.7">
+  /**
+   * Test that a put of a file-backed Blob/File whose backing file has been
+   * deleted results in a failure of that put failure.
+   *
+   * In order to create a file-backed Blob and ensure that we actually try and
+   * copy its contents (rather than triggering a reference-count increment), we
+   * use two separate databases.  This test is derived from
+   * test_file_cross_database_copying.html.
+   */
+  function* testSteps()
+  {
+    const READ_WRITE = "readwrite";
+
+    const databaseInfo = [
+      { name: window.location.pathname + "1", source: true },
+      { name: window.location.pathname + "2", source: false }
+    ];
+
+    const objectStoreName = "Blobs";
+
+    const fileData = { key: 1, file: getRandomFile("random.bin", 10000) };
+
+    SpecialPowers.pushPrefEnv({ set: [["dom.indexedDB.dataThreshold", -1]] },
+                              continueToNextStep);
+    yield undefined;
+
+    // Open both databases, put the File in the source.
+    let databases = [];
+    for (let info of databaseInfo) {
+      let request = indexedDB.open(info.name, 1);
+      request.onerror = errorHandler;
+      request.onupgradeneeded = grabEventAndContinueHandler;
+      request.onsuccess = grabEventAndContinueHandler;
+      let event = yield undefined;
+
+      is(event.type, "upgradeneeded", "Got correct event type");
+
+      let db = event.target.result;
+      // We don't expect any errors yet for either database, but will later on.
+      db.onerror = errorHandler;
+
+      let objectStore = db.createObjectStore(objectStoreName, { });
+      if (info.source) {
+        objectStore.add(fileData.file, fileData.key);
+      }
+
+      event = yield undefined;
+
+      is(event.type, "success", "Got correct event type");
+
+      databases.push(db);
+    }
+
+    // Get a reference to the file-backed File.
+    let fileBackedFile;
+    for (let db of databases.slice(0, 1)) {
+      let request = db.transaction([objectStoreName])
+                      .objectStore(objectStoreName).get(fileData.key);
+      request.onsuccess = grabEventAndContinueHandler;
+      event = yield undefined;
+
+      let result = event.target.result;
+      verifyBlob(result, fileData.file, 1);
+      yield undefined;
+
+      fileBackedFile = result;
+    }
+
+    // Delete the backing file...
+    let fileFullPath = getFilePath(fileBackedFile);
+    // (We want to chop off the profile root and the resulting path component
+    // must not start with a directory separator.)
+    let fileRelPath =
+      fileFullPath.substring(fileFullPath.search(/[/\\]storage[/\\]default[/\\]/) + 1);
+    info("trying to delete: " + fileRelPath);
+    // by using the existing SpecialPowers mechanism to create files and clean
+    // them up.  We clobber our existing content, then trigger deletion to
+    // clean up after it.
+    SpecialPowers.createFiles(
+      [{ name: fileRelPath, data: '' }],
+      grabEventAndContinueHandler, errorCallbackHandler);
+    yield undefined;
+    // This is async without a callback because it's intended for cleanup.
+    // Since IDB is PBackground, we can't depend on serial ordering, so we need
+    // to use another async action.
+    SpecialPowers.removeFiles();
+    SpecialPowers.executeAfterFlushingMessageQueue(grabEventAndContinueHandler);
+    yield undefined;
+    // The file is now deleted!
+
+    // Try and put the file-backed Blob in the database, expect failure on the
+    // request and transaction.
+    info("attempt to store deleted file-backed blob"); // context for NS_WARN_IF
+    for (let i = 1; i < databases.length; i++) {
+      let db = databases[i];
+
+      let trans = db.transaction([objectStoreName], READ_WRITE);
+      let objectStore = trans.objectStore(objectStoreName);
+
+      request = objectStore.add(fileBackedFile, 2);
+      request.onsuccess = unexpectedSuccessHandler;
+      request.onerror = expectedErrorHandler("UnknownError");
+      trans.onsuccess = unexpectedSuccessHandler;
+      trans.onerror = expectedErrorHandler("UnknownError");
+      // the database will also throw an error.
+      db.onerror = expectedErrorHandler("UnknownError");
+      event = yield undefined;
+      event = yield undefined;
+      event = yield undefined;
+      // the database shouldn't throw any more errors now.
+      db.onerror = errorHandler;
+    }
+
+    // Ensure there's nothing with that key in the target database.
+    info("now that the transaction failed, make sure our put got rolled back");
+    for (let i = 1; i < databases.length; i++) {
+      let db = databases[i];
+
+      let objectStore = db.transaction([objectStoreName], "readonly")
+                          .objectStore(objectStoreName);
+
+      // Attempt to fetch the key to verify there's nothing in the DB rather
+      // than the value which could return undefined as a misleading error.
+      request = objectStore.getKey(2);
+      request.onsuccess = grabEventAndContinueHandler;
+      request.onerror = errorHandler;
+      event = yield undefined;
+
+      let result = event.target.result;
+      is(result, undefined, "no key found"); // (the get returns undefined)
+    }
+
+    finishTest();
+  }
+  </script>
+  <script type="text/javascript;version=1.7" src="file.js"></script>
+  <script type="text/javascript;version=1.7" src="helpers.js"></script>
+
+</head>
+
+<body onload="runTest();"></body>
+
+</html>
--- a/testing/specialpowers/content/SpecialPowersObserver.jsm
+++ b/testing/specialpowers/content/SpecialPowersObserver.jsm
@@ -252,27 +252,27 @@ SpecialPowersObserver.prototype.receiveM
         this._createdFiles = new Array;
       }
       let createdFiles = this._createdFiles;
       try {
         aMessage.data.forEach(function(request) {
           const filePerms = 0666;
           let testFile = Services.dirsvc.get("ProfD", Ci.nsIFile);
           if (request.name) {
-            testFile.append(request.name);
+            testFile.appendRelativePath(request.name);
           } else {
             testFile.createUnique(Ci.nsIFile.NORMAL_FILE_TYPE, filePerms);
           }
           let outStream = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(Ci.nsIFileOutputStream);
           outStream.init(testFile, 0x02 | 0x08 | 0x20, // PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE
                          filePerms, 0);
           if (request.data) {
             outStream.write(request.data, request.data.length);
-            outStream.close();
           }
+          outStream.close();
           filePaths.push(File.createFromFileName(testFile.path, request.options));
           createdFiles.push(testFile);
         });
         aMessage.target
                 .QueryInterface(Ci.nsIFrameLoaderOwner)
                 .frameLoader
                 .messageManager
                 .sendAsyncMessage("SpecialPowers.FilesCreated", filePaths);