Backed out 4 changesets (bug 1311935) for causing assertion crash by developer's request
authorIris Hsiao <ihsiao@mozilla.com>
Tue, 11 Apr 2017 11:04:54 +0800
changeset 560260 992b8a6ae635c5e8f0aa0ab03e161ef243add7b4
parent 560259 a25e24d077b8e16c441f0f622342452108fd79db
child 560261 fafb04b95d0fd05429fd991e23f059aa32065dcf
push id53365
push userjichen@mozilla.com
push dateTue, 11 Apr 2017 08:35:12 +0000
bugs1311935
milestone55.0a1
backs out27e624cd94796146934ca29e98f8934b61243809
4c0381ab099046d316200e0779616e717606da0d
73587838ef1620aef0ce63abad7b8c824f5eacf6
a5a6c0f79733688fc11c3a03a0d2ea9b2ae1d8aa
Backed out 4 changesets (bug 1311935) for causing assertion crash by developer's request Backed out changeset 27e624cd9479 (bug 1311935) Backed out changeset 4c0381ab0990 (bug 1311935) Backed out changeset 73587838ef16 (bug 1311935) Backed out changeset a5a6c0f79733 (bug 1311935)
toolkit/components/url-classifier/Classifier.cpp
toolkit/components/url-classifier/Classifier.h
toolkit/components/url-classifier/Entries.h
toolkit/components/url-classifier/HashStore.cpp
toolkit/components/url-classifier/HashStore.h
toolkit/components/url-classifier/LookupCache.cpp
toolkit/components/url-classifier/LookupCache.h
toolkit/components/url-classifier/LookupCacheV4.cpp
toolkit/components/url-classifier/LookupCacheV4.h
toolkit/components/url-classifier/VariableLengthPrefixSet.cpp
toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
toolkit/components/url-classifier/nsUrlClassifierDBService.h
toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
toolkit/components/url-classifier/tests/gtest/Common.cpp
toolkit/components/url-classifier/tests/gtest/Common.h
toolkit/components/url-classifier/tests/gtest/TestCachingV4.cpp
toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
toolkit/components/url-classifier/tests/gtest/TestLookupCacheV4.cpp
toolkit/components/url-classifier/tests/gtest/moz.build
toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
toolkit/components/url-classifier/tests/unit/test_partial.js
--- a/toolkit/components/url-classifier/Classifier.cpp
+++ b/toolkit/components/url-classifier/Classifier.cpp
@@ -388,32 +388,25 @@ Classifier::TableRequest(nsACString& aRe
 
   // Generating v2 table info.
   nsTArray<nsCString> tables;
   ActiveTables(tables);
   for (uint32_t i = 0; i < tables.Length(); i++) {
     HashStore store(tables[i], GetProvider(tables[i]), mRootStoreDirectory);
 
     nsresult rv = store.Open();
-    if (NS_FAILED(rv)) {
+    if (NS_FAILED(rv))
       continue;
-    }
+
+    aResult.Append(store.TableName());
+    aResult.Append(';');
 
     ChunkSet &adds = store.AddChunks();
     ChunkSet &subs = store.SubChunks();
 
-    // Open HashStore will always succeed even that is not a v2 table.
-    // So skip tables without add and sub chunks.
-    if (adds.Length() == 0 && subs.Length() == 0) {
-      continue;
-    }
-
-    aResult.Append(store.TableName());
-    aResult.Append(';');
-
     if (adds.Length() > 0) {
       aResult.AppendLiteral("a:");
       nsAutoCString addList;
       adds.Serialize(addList);
       aResult.Append(addList);
     }
 
     if (subs.Length() > 0) {
@@ -491,28 +484,35 @@ Classifier::Check(const nsACString& aSpe
       nsAutoCString checking;
       lookupHash.ToHexString(checking);
       LOG(("Checking fragment %s, hash %s (%X)", fragments[i].get(),
            checking.get(), lookupHash.ToUint32()));
     }
 
     for (uint32_t i = 0; i < cacheArray.Length(); i++) {
       LookupCache *cache = cacheArray[i];
-      bool has, fromCache, confirmed;
+      bool has, fromCache;
       uint32_t matchLength;
 
-      rv = cache->Has(lookupHash, mTableFreshness, aFreshnessGuarantee,
-                      &has, &matchLength, &confirmed, &fromCache);
+      rv = cache->Has(lookupHash, &has, &matchLength, &fromCache);
       NS_ENSURE_SUCCESS(rv, rv);
-
       if (has) {
         LookupResult *result = aResults.AppendElement();
         if (!result)
           return NS_ERROR_OUT_OF_MEMORY;
 
+        // For V2, there is no TTL for caching, so we use table freshness to
+        // decide if matching a completion should trigger a gethash request or not.
+        // For V4, this is done by Positive Caching & Negative Caching mechanism.
+        bool confirmed = false;
+        if (fromCache) {
+          cache->IsHashEntryConfirmed(lookupHash, mTableFreshness,
+                                      aFreshnessGuarantee, &confirmed);
+        }
+
         LOG(("Found a result in %s: %s",
              cache->TableName().get(),
              confirmed ? "confirmed." : "Not confirmed."));
 
         result->hash.complete = lookupHash;
         result->mConfirmed = confirmed;
         result->mTableName.Assign(cache->TableName());
         result->mPartialHashLength = confirmed ? COMPLETE_SIZE : matchLength;
@@ -917,87 +917,67 @@ Classifier::DropStores()
 }
 
 nsresult
 Classifier::RegenActiveTables()
 {
   mActiveTablesCache.Clear();
 
   nsTArray<nsCString> foundTables;
-  ScanStoreDir(mRootStoreDirectory, foundTables);
+  ScanStoreDir(foundTables);
 
   for (uint32_t i = 0; i < foundTables.Length(); i++) {
     nsCString table(foundTables[i]);
+    HashStore store(table, GetProvider(table), mRootStoreDirectory);
 
-    LookupCache *lookupCache = GetLookupCache(table);
+    nsresult rv = store.Open();
+    if (NS_FAILED(rv))
+      continue;
+
+    LookupCache *lookupCache = GetLookupCache(store.TableName());
     if (!lookupCache) {
       continue;
     }
 
-    if (!lookupCache->IsPrimed()) {
+    if (!lookupCache->IsPrimed())
       continue;
-    }
 
-    if (LookupCache::Cast<LookupCacheV4>(lookupCache)) {
-      LOG(("Active v4 table: %s", table.get()));
-    } else {
-      HashStore store(table, GetProvider(table), mRootStoreDirectory);
+    const ChunkSet &adds = store.AddChunks();
+    const ChunkSet &subs = store.SubChunks();
 
-      nsresult rv = store.Open();
-      if (NS_FAILED(rv)) {
-        continue;
-      }
-
-      const ChunkSet &adds = store.AddChunks();
-      const ChunkSet &subs = store.SubChunks();
+    if (adds.Length() == 0 && subs.Length() == 0)
+      continue;
 
-      if (adds.Length() == 0 && subs.Length() == 0) {
-        continue;
-      }
-
-      LOG(("Active v2 table: %s", store.TableName().get()));
-    }
-
-    mActiveTablesCache.AppendElement(table);
+    LOG(("Active table: %s", store.TableName().get()));
+    mActiveTablesCache.AppendElement(store.TableName());
   }
 
   return NS_OK;
 }
 
 nsresult
-Classifier::ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables)
+Classifier::ScanStoreDir(nsTArray<nsCString>& aTables)
 {
   nsCOMPtr<nsISimpleEnumerator> entries;
-  nsresult rv = aDirectory->GetDirectoryEntries(getter_AddRefs(entries));
+  nsresult rv = mRootStoreDirectory->GetDirectoryEntries(getter_AddRefs(entries));
   NS_ENSURE_SUCCESS(rv, rv);
 
   bool hasMore;
   while (NS_SUCCEEDED(rv = entries->HasMoreElements(&hasMore)) && hasMore) {
     nsCOMPtr<nsISupports> supports;
     rv = entries->GetNext(getter_AddRefs(supports));
     NS_ENSURE_SUCCESS(rv, rv);
 
     nsCOMPtr<nsIFile> file = do_QueryInterface(supports);
 
-    // If |file| is a directory, recurse to find its entries as well.
-    bool isDirectory;
-    if (NS_FAILED(file->IsDirectory(&isDirectory))) {
-      continue;
-    }
-    if (isDirectory) {
-      ScanStoreDir(file, aTables);
-      continue;
-    }
-
     nsCString leafName;
     rv = file->GetNativeLeafName(leafName);
     NS_ENSURE_SUCCESS(rv, rv);
 
-    // Both v2 and v4 contain .pset file
-    nsCString suffix(NS_LITERAL_CSTRING(".pset"));
+    nsCString suffix(NS_LITERAL_CSTRING(".sbstore"));
 
     int32_t dot = leafName.RFind(suffix, 0);
     if (dot != -1) {
       leafName.Cut(dot, suffix.Length());
       aTables.AppendElement(leafName);
     }
   }
   NS_ENSURE_SUCCESS(rv, rv);
@@ -1321,21 +1301,16 @@ Classifier::UpdateTableV4(nsTArray<Table
   }
 
   LookupCacheV4* lookupCache =
     LookupCache::Cast<LookupCacheV4>(GetLookupCacheForUpdate(aTable));
   if (!lookupCache) {
     return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND;
   }
 
-  // Remove cache entries whose negative cache time is expired when update.
-  // We don't check if positive cache time is expired here because we want to
-  // keep the eviction rule simple when doing an update.
-  lookupCache->InvalidateExpiredCacheEntry();
-
   nsresult rv = NS_OK;
 
   // If there are multiple updates for the same table, prefixes1 & prefixes2
   // will act as input and output in turn to reduce memory copy overhead.
   PrefixStringMap prefixes1, prefixes2;
   PrefixStringMap* input = &prefixes1;
   PrefixStringMap* output = &prefixes2;
 
@@ -1416,29 +1391,18 @@ Classifier::UpdateCache(TableUpdate* aUp
   nsAutoCString table(aUpdate->TableName());
   LOG(("Classifier::UpdateCache(%s)", table.get()));
 
   LookupCache *lookupCache = GetLookupCache(table);
   if (!lookupCache) {
     return NS_ERROR_FAILURE;
   }
 
-  auto lookupV2 = LookupCache::Cast<LookupCacheV2>(lookupCache);
-  if (lookupV2) {
-    auto updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
-    lookupV2->AddCompletionsToCache(updateV2->AddCompletes());
-  } else {
-    auto lookupV4 = LookupCache::Cast<LookupCacheV4>(lookupCache);
-    if (!lookupV4) {
-      return NS_ERROR_FAILURE;
-    }
-
-    auto updateV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
-    lookupV4->AddFullHashResponseToCache(updateV4->FullHashResponse());
-  }
+  auto updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
+  lookupCache->AddCompletionsToCache(updateV2->AddCompletes());
 
 #if defined(DEBUG)
   lookupCache->DumpCache();
 #endif
 
   return NS_OK;
 }
 
--- a/toolkit/components/url-classifier/Classifier.h
+++ b/toolkit/components/url-classifier/Classifier.h
@@ -147,17 +147,17 @@ private:
   // and on-disk data.
   void RemoveUpdateIntermediaries();
 
 #ifdef MOZ_SAFEBROWSING_DUMP_FAILED_UPDATES
   already_AddRefed<nsIFile> GetFailedUpdateDirectroy();
   nsresult DumpFailedUpdate();
 #endif
 
-  nsresult ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables);
+  nsresult ScanStoreDir(nsTArray<nsCString>& aTables);
 
   nsresult UpdateHashStore(nsTArray<TableUpdate*>* aUpdates,
                            const nsACString& aTable);
 
   nsresult UpdateTableV4(nsTArray<TableUpdate*>* aUpdates,
                          const nsACString& aTable);
 
   nsresult UpdateCache(TableUpdate* aUpdates);
--- a/toolkit/components/url-classifier/Entries.h
+++ b/toolkit/components/url-classifier/Entries.h
@@ -313,50 +313,12 @@ WriteTArray(nsIOutputStream* aStream, ns
                         aArray.Length() * sizeof(T),
                         &written);
 }
 
 typedef nsClassHashtable<nsUint32HashKey, nsCString> PrefixStringMap;
 
 typedef nsDataHashtable<nsCStringHashKey, int64_t> TableFreshnessMap;
 
-typedef nsCStringHashKey VLHashPrefixString;
-typedef nsCStringHashKey FullHashString;
-
-typedef nsDataHashtable<FullHashString, int64_t> FullHashExpiryCache;
-
-struct CachedFullHashResponse {
-  int64_t negativeCacheExpirySec;
-
-  // Map contains all matches found in Fullhash response, this field might be empty.
-  FullHashExpiryCache fullHashes;
-
-  CachedFullHashResponse& operator=(const CachedFullHashResponse& aOther) {
-    negativeCacheExpirySec = aOther.negativeCacheExpirySec;
-
-    fullHashes.Clear();
-    for (auto iter = aOther.fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
-      fullHashes.Put(iter.Key(), iter.Data());
-    }
-
-    return *this;
-  }
-
-  bool operator==(const CachedFullHashResponse& aOther) const {
-    if (negativeCacheExpirySec != aOther.negativeCacheExpirySec ||
-        fullHashes.Count() != aOther.fullHashes.Count()) {
-      return false;
-    }
-    for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
-      if (iter.Data() != aOther.fullHashes.Get(iter.Key())) {
-        return false;
-      }
-    }
-    return true;
-  }
-};
-
-typedef nsClassHashtable<VLHashPrefixString, CachedFullHashResponse> FullHashResponseMap;
-
 } // namespace safebrowsing
 } // namespace mozilla
 
 #endif // SBEntries_h__
--- a/toolkit/components/url-classifier/HashStore.cpp
+++ b/toolkit/components/url-classifier/HashStore.cpp
@@ -188,29 +188,16 @@ TableUpdateV4::NewRemovalIndices(const u
 }
 
 void
 TableUpdateV4::NewChecksum(const std::string& aChecksum)
 {
   mChecksum.Assign(aChecksum.data(), aChecksum.size());
 }
 
-nsresult
-TableUpdateV4::NewFullHashResponse(const nsACString& aPrefix,
-                                   CachedFullHashResponse& aResponse)
-{
-  CachedFullHashResponse* response =
-    mFullHashResponseMap.LookupOrAdd(aPrefix);
-  if (!response) {
-    return NS_ERROR_OUT_OF_MEMORY;
-  }
-  *response = aResponse;
-  return NS_OK;
-}
-
 HashStore::HashStore(const nsACString& aTableName,
                      const nsACString& aProvider,
                      nsIFile* aRootStoreDir)
   : mTableName(aTableName)
   , mInUpdate(false)
   , mFileSize(0)
 {
   nsresult rv = Classifier::GetPrivateStoreDirectory(aRootStoreDir,
--- a/toolkit/components/url-classifier/HashStore.h
+++ b/toolkit/components/url-classifier/HashStore.h
@@ -154,50 +154,42 @@ public:
   explicit TableUpdateV4(const nsACString& aTable)
     : TableUpdate(aTable)
     , mFullUpdate(false)
   {
   }
 
   bool Empty() const override
   {
-    return mPrefixesMap.IsEmpty() &&
-           mRemovalIndiceArray.IsEmpty() &&
-           mFullHashResponseMap.IsEmpty();
+    return mPrefixesMap.IsEmpty() && mRemovalIndiceArray.IsEmpty();
   }
 
   bool IsFullUpdate() const { return mFullUpdate; }
   PrefixStdStringMap& Prefixes() { return mPrefixesMap; }
   RemovalIndiceArray& RemovalIndices() { return mRemovalIndiceArray; }
   const nsACString& ClientState() const { return mClientState; }
   const nsACString& Checksum() const { return mChecksum; }
-  const FullHashResponseMap& FullHashResponse() const { return mFullHashResponseMap; }
 
   // For downcasting.
   static const int TAG = 4;
 
   void SetFullUpdate(bool aIsFullUpdate) { mFullUpdate = aIsFullUpdate; }
   void NewPrefixes(int32_t aSize, std::string& aPrefixes);
   void NewRemovalIndices(const uint32_t* aIndices, size_t aNumOfIndices);
   void SetNewClientState(const nsACString& aState) { mClientState = aState; }
   void NewChecksum(const std::string& aChecksum);
-  nsresult NewFullHashResponse(const nsACString& aPrefix,
-                               CachedFullHashResponse& aResponse);
 
 private:
   virtual int Tag() const override { return TAG; }
 
   bool mFullUpdate;
   PrefixStdStringMap mPrefixesMap;
   RemovalIndiceArray mRemovalIndiceArray;
   nsCString mClientState;
   nsCString mChecksum;
-
-  // This is used to store response from fullHashes.find.
-  FullHashResponseMap mFullHashResponseMap;
 };
 
 // There is one hash store per table.
 class HashStore {
 public:
   HashStore(const nsACString& aTableName,
             const nsACString& aProvider,
             nsIFile* aRootStoreFile);
--- a/toolkit/components/url-classifier/LookupCache.cpp
+++ b/toolkit/components/url-classifier/LookupCache.cpp
@@ -35,19 +35,16 @@
 // MOZ_LOG=UrlClassifierDbService:5
 extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
 #define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args)
 #define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug)
 
 namespace mozilla {
 namespace safebrowsing {
 
-const int CacheResultV2::VER = CacheResult::V2;
-const int CacheResultV4::VER = CacheResult::V4;
-
 const int LookupCacheV2::VER = 2;
 
 LookupCache::LookupCache(const nsACString& aTableName,
                          const nsACString& aProvider,
                          nsIFile* aRootStoreDir)
   : mPrimed(false)
   , mTableName(aTableName)
   , mProvider(aProvider)
@@ -92,16 +89,44 @@ LookupCache::UpdateRootDirHandle(nsIFile
     LOG(("Private store directory for %s is %s", mTableName.get(),
                                                  NS_ConvertUTF16toUTF8(path).get()));
   }
 
   return rv;
 }
 
 nsresult
+LookupCache::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
+{
+  for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
+    if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
+      mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
+    }
+  }
+  mGetHashCache.Sort();
+
+  return NS_OK;
+}
+
+#if defined(DEBUG)
+void
+LookupCache::DumpCache()
+{
+  if (!LOG_ENABLED())
+    return;
+
+  for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
+    nsAutoCString str;
+    mGetHashCache[i].ToHexString(str);
+    LOG(("Caches: %s", str.get()));
+  }
+}
+#endif
+
+nsresult
 LookupCache::WriteFile()
 {
   if (nsUrlClassifierDBService::ShutdownHasStarted()) {
     return NS_ERROR_ABORT;
   }
 
   nsCOMPtr<nsIFile> psFile;
   nsresult rv = mStoreDirectory->Clone(getter_AddRefs(psFile));
@@ -119,16 +144,22 @@ LookupCache::WriteFile()
 void
 LookupCache::ClearAll()
 {
   ClearCache();
   ClearPrefixes();
   mPrimed = false;
 }
 
+void
+LookupCache::ClearCache()
+{
+  mGetHashCache.Clear();
+}
+
 /* static */ bool
 LookupCache::IsCanonicalizedIP(const nsACString& aHost)
 {
   // The canonicalization process will have left IP addresses in dotted
   // decimal with no surprises.
   uint32_t i1, i2, i3, i4;
   char c;
   if (PR_sscanf(PromiseFlatCString(aHost).get(), "%u.%u.%u.%u%c",
@@ -362,22 +393,20 @@ void
 LookupCacheV2::ClearAll()
 {
   LookupCache::ClearAll();
   mUpdateCompletions.Clear();
 }
 
 nsresult
 LookupCacheV2::Has(const Completion& aCompletion,
-                   const TableFreshnessMap& aTableFreshness,
-                   uint32_t aFreshnessGuarantee,
                    bool* aHas, uint32_t* aMatchLength,
-                   bool* aConfirmed, bool* aFromCache)
+                   bool* aFromCache)
 {
-  *aHas = *aConfirmed = *aFromCache = false;
+  *aHas = *aFromCache = false;
   *aMatchLength = 0;
 
   uint32_t prefix = aCompletion.ToUint32();
 
   bool found;
   nsresult rv = mPrefixSet->Contains(prefix, &found);
   NS_ENSURE_SUCCESS(rv, rv);
 
@@ -389,30 +418,40 @@ LookupCacheV2::Has(const Completion& aCo
   }
 
   if ((mGetHashCache.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex) ||
       (mUpdateCompletions.BinaryIndexOf(aCompletion) != nsTArray<Completion>::NoIndex)) {
     LOG(("Complete in %s", mTableName.get()));
     *aFromCache = true;
     *aHas = true;
     *aMatchLength = COMPLETE_SIZE;
-
-    int64_t ageSec; // in seconds
-    if (aTableFreshness.Get(mTableName, &ageSec)) {
-      int64_t nowSec = (PR_Now() / PR_USEC_PER_SEC);
-      MOZ_ASSERT(ageSec <= nowSec);
-
-      // Considered completion as unsafe if its table is up-to-date.
-      *aConfirmed = (nowSec - ageSec) < aFreshnessGuarantee;
-    }
   }
 
   return NS_OK;
 }
 
+void
+LookupCacheV2::IsHashEntryConfirmed(const Completion& aEntry,
+                                    const TableFreshnessMap& aTableFreshness,
+                                    uint32_t aFreshnessGuarantee,
+                                    bool* aConfirmed)
+{
+  int64_t age; // in seconds
+  bool found = aTableFreshness.Get(mTableName, &age);
+  if (!found) {
+    *aConfirmed = false;
+  } else {
+    int64_t now = (PR_Now() / PR_USEC_PER_SEC);
+    MOZ_ASSERT(age <= now);
+
+    // Considered completion as unsafe if its table is up-to-date.
+    *aConfirmed = (now - age) < aFreshnessGuarantee;
+  }
+}
+
 bool
 LookupCacheV2::IsEmpty()
 {
   bool isEmpty;
   mPrefixSet->IsEmpty(&isEmpty);
   return isEmpty;
 }
 
@@ -448,29 +487,16 @@ LookupCacheV2::GetPrefixes(FallibleTArra
     // This can happen if its a new table, so no error.
     LOG(("GetPrefixes from empty LookupCache"));
     return NS_OK;
   }
   return mPrefixSet->GetPrefixesNative(aAddPrefixes);
 }
 
 nsresult
-LookupCacheV2::AddCompletionsToCache(AddCompleteArray& aAddCompletes)
-{
-  for (uint32_t i = 0; i < aAddCompletes.Length(); i++) {
-    if (mGetHashCache.BinaryIndexOf(aAddCompletes[i].CompleteHash()) == mGetHashCache.NoIndex) {
-      mGetHashCache.AppendElement(aAddCompletes[i].CompleteHash());
-    }
-  }
-  mGetHashCache.Sort();
-
-  return NS_OK;
-}
-
-nsresult
 LookupCacheV2::ReadCompletions()
 {
   HashStore store(mTableName, mProvider, mRootStoreDirectory);
 
   nsresult rv = store.Open();
   NS_ENSURE_SUCCESS(rv, rv);
 
   mUpdateCompletions.Clear();
@@ -478,22 +504,16 @@ LookupCacheV2::ReadCompletions()
   const AddCompleteArray& addComplete = store.AddCompletes();
   for (uint32_t i = 0; i < addComplete.Length(); i++) {
     mUpdateCompletions.AppendElement(addComplete[i].complete);
   }
 
   return NS_OK;
 }
 
-void
-LookupCacheV2::ClearCache()
-{
-  mGetHashCache.Clear();
-}
-
 nsresult
 LookupCacheV2::ClearPrefixes()
 {
   return mPrefixSet->SetPrefixes(nullptr, 0);
 }
 
 nsresult
 LookupCacheV2::StoreToFile(nsIFile* aFile)
@@ -564,31 +584,16 @@ LookupCacheV2::ConstructPrefixSet(AddPre
 #endif
 
   mPrimed = true;
 
   return NS_OK;
 }
 
 #if defined(DEBUG)
-
-void
-LookupCacheV2::DumpCache()
-{
-  if (!LOG_ENABLED()) {
-    return;
-  }
-
-  for (uint32_t i = 0; i < mGetHashCache.Length(); i++) {
-    nsAutoCString str;
-    mGetHashCache[i].ToHexString(str);
-    LOG(("Caches: %s", str.get()));
-  }
-}
-
 void
 LookupCacheV2::DumpCompletions()
 {
   if (!LOG_ENABLED())
     return;
 
   for (uint32_t i = 0; i < mUpdateCompletions.Length(); i++) {
     nsAutoCString str;
--- a/toolkit/components/url-classifier/LookupCache.h
+++ b/toolkit/components/url-classifier/LookupCache.h
@@ -98,78 +98,28 @@ public:
   bool mProtocolV2;
 
   // This is only used by telemetry to record the match result.
   MatchResult mMatchResult;
 };
 
 typedef nsTArray<LookupResult> LookupResultArray;
 
-class CacheResult {
-public:
-  enum { V2, V4 };
-
-  virtual int Ver() const = 0;
-  virtual bool findCompletion(const Completion& aCompletion) const = 0;
-
-  virtual ~CacheResult() {}
-
-  template<typename T>
-  static T* Cast(CacheResult* aThat) {
-    return ((aThat && T::VER == aThat->Ver()) ?
-      reinterpret_cast<T*>(aThat) : nullptr);
-  }
+struct CacheResult {
+  AddComplete entry;
+  nsCString table;
 
-  nsCString table;
-};
-
-class CacheResultV2 final : public CacheResult
-{
-public:
-  static const int VER;
-
-  Completion completion;
-  uint32_t addChunk;
-
-  bool operator==(const CacheResultV2& aOther) const {
-    return table == aOther.table &&
-           completion == aOther.completion &&
-           addChunk == aOther.addChunk;
+  bool operator==(const CacheResult& aOther) const {
+    if (entry != aOther.entry) {
+      return false;
+    }
+    return table == aOther.table;
   }
-
-  bool findCompletion(const Completion& aCompletion) const override {
-    return completion == aCompletion;
-  }
-
-  virtual int Ver() const override { return VER; }
 };
-
-class CacheResultV4 final : public CacheResult
-{
-public:
-  static const int VER;
-
-  nsCString prefix;
-  CachedFullHashResponse response;
-
-  bool operator==(const CacheResultV4& aOther) const {
-    return prefix == aOther.prefix &&
-           response == aOther.response;
-  }
-
-  bool findCompletion(const Completion& aCompletion) const override {
-    nsDependentCSubstring completion(
-      reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
-    return response.fullHashes.Contains(completion);
-  }
-
-  virtual int Ver() const override { return VER; }
-};
-
-typedef nsTArray<UniquePtr<CacheResult>> CacheResultArray;
+typedef nsTArray<CacheResult> CacheResultArray;
 
 class LookupCache {
 public:
   // Check for a canonicalized IP address.
   static bool IsCanonicalizedIP(const nsACString& aHost);
 
   // take a lookup string (www.hostname.com/path/to/resource.html) and
   // expand it into the set of fragments that should be searched for in an
@@ -190,41 +140,47 @@ public:
   virtual ~LookupCache() {}
 
   const nsCString &TableName() const { return mTableName; }
 
   // The directory handle where we operate will
   // be moved away when a backup is made.
   nsresult UpdateRootDirHandle(nsIFile* aRootStoreDirectory);
 
+  // This will Clear() the passed arrays when done.
+  nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
+
   // Write data stored in lookup cache to disk.
   nsresult WriteFile();
 
+  // Clear completions retrieved from gethash request.
+  void ClearCache();
+
   bool IsPrimed() const { return mPrimed; };
 
+#if DEBUG
+  void DumpCache();
+#endif
+
   virtual nsresult Open();
   virtual nsresult Init() = 0;
   virtual nsresult ClearPrefixes() = 0;
   virtual nsresult Has(const Completion& aCompletion,
-                       const TableFreshnessMap& aTableFreshness,
-                       uint32_t aFreshnessGuarantee,
                        bool* aHas, uint32_t* aMatchLength,
-                       bool* aConfirmed, bool* aFromCache) = 0;
+                       bool* aFromCache) = 0;
 
-  // Clear completions retrieved from gethash request.
-  virtual void ClearCache() = 0;
+  virtual void IsHashEntryConfirmed(const Completion& aEntry,
+                                    const TableFreshnessMap& aTableFreshness,
+                                    uint32_t aFreshnessGuarantee,
+                                    bool* aConfirmed) = 0;
 
   virtual bool IsEmpty() = 0;
 
   virtual void ClearAll();
 
-#if DEBUG
-  virtual void DumpCache() = 0;
-#endif
-
   template<typename T>
   static T* Cast(LookupCache* aThat) {
     return ((aThat && T::VER == aThat->Ver()) ? reinterpret_cast<T*>(aThat) : nullptr);
   }
 
 private:
   nsresult LoadPrefixSet();
 
@@ -236,52 +192,52 @@ private:
 
 protected:
   bool mPrimed;
   nsCString mTableName;
   nsCString mProvider;
   nsCOMPtr<nsIFile> mRootStoreDirectory;
   nsCOMPtr<nsIFile> mStoreDirectory;
 
+  // Full length hashes obtained in gethash request
+  CompletionArray mGetHashCache;
+
   // For gtest to inspect private members.
   friend class PerProviderDirectoryTestUtils;
 };
 
 class LookupCacheV2 final : public LookupCache
 {
 public:
   explicit LookupCacheV2(const nsACString& aTableName,
                          const nsACString& aProvider,
                          nsIFile* aStoreFile)
     : LookupCache(aTableName, aProvider, aStoreFile) {}
   ~LookupCacheV2() {}
 
   virtual nsresult Init() override;
   virtual nsresult Open() override;
-  virtual void ClearCache() override;
   virtual void ClearAll() override;
   virtual nsresult Has(const Completion& aCompletion,
-                       const TableFreshnessMap& aTableFreshness,
-                       uint32_t aFreshnessGuarantee,
                        bool* aHas, uint32_t* aMatchLength,
-                       bool* aConfirmed, bool* aFromCache) override;
+                       bool* aFromCache) override;
+
+  virtual void IsHashEntryConfirmed(const Completion& aEntry,
+                                    const TableFreshnessMap& aTableFreshness,
+                                    uint32_t aFreshnessGuarantee,
+                                    bool* aConfirmed) override;
 
   virtual bool IsEmpty() override;
 
   nsresult Build(AddPrefixArray& aAddPrefixes,
                  AddCompleteArray& aAddCompletes);
 
   nsresult GetPrefixes(FallibleTArray<uint32_t>& aAddPrefixes);
 
-  // This will Clear() the passed arrays when done.
-  nsresult AddCompletionsToCache(AddCompleteArray& aAddCompletes);
-
 #if DEBUG
-  virtual void DumpCache() override;
-
   void DumpCompletions();
 #endif
 
   static const int VER;
 
 protected:
   nsresult ReadCompletions();
 
@@ -297,17 +253,14 @@ private:
   // This will Clear() aAddPrefixes when done.
   nsresult ConstructPrefixSet(AddPrefixArray& aAddPrefixes);
 
   // Full length hashes obtained in update request
   CompletionArray mUpdateCompletions;
 
   // Set of prefixes known to be in the database
   RefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
-
-  // Full length hashes obtained in gethash request
-  CompletionArray mGetHashCache;
 };
 
 } // namespace safebrowsing
 } // namespace mozilla
 
 #endif
--- a/toolkit/components/url-classifier/LookupCacheV4.cpp
+++ b/toolkit/components/url-classifier/LookupCacheV4.cpp
@@ -75,107 +75,53 @@ LookupCacheV4::Init()
   nsresult rv = mVLPrefixSet->Init(mTableName);
   NS_ENSURE_SUCCESS(rv, rv);
 
   return NS_OK;
 }
 
 nsresult
 LookupCacheV4::Has(const Completion& aCompletion,
-                   const TableFreshnessMap& aTableFreshness,
-                   uint32_t aFreshnessGuarantee,
                    bool* aHas, uint32_t* aMatchLength,
-                   bool* aConfirmed, bool* aFromCache)
+                   bool* aFromCache)
 {
-  *aHas = *aConfirmed = *aFromCache = false;
+  *aHas = *aFromCache = false;
   *aMatchLength = 0;
 
   uint32_t length = 0;
   nsDependentCSubstring fullhash;
   fullhash.Rebind((const char *)aCompletion.buf, COMPLETE_SIZE);
 
   nsresult rv = mVLPrefixSet->Matches(fullhash, &length);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  MOZ_ASSERT(length == 0 || (length >= PREFIX_SIZE && length <= COMPLETE_SIZE));
-
   *aHas = length >= PREFIX_SIZE;
   *aMatchLength = length;
 
   if (LOG_ENABLED()) {
     uint32_t prefix = aCompletion.ToUint32();
     LOG(("Probe in V4 %s: %X, found %d, complete %d", mTableName.get(),
           prefix, *aHas, length == COMPLETE_SIZE));
   }
 
-  // Check if fullhash match any prefix in the local database
-  if (!(*aHas)) {
-    return NS_OK;
-  }
-
-  // We always send 4-bytes for completion(Bug 1323953) so the prefix used to
-  // lookup for cache should be 4-bytes too.
-  nsDependentCSubstring prefix(reinterpret_cast<const char*>(aCompletion.buf),
-                               PREFIX_SIZE);
-
-  // Check if prefix can be found in cache.
-  CachedFullHashResponse* fullHashResponse = mCache.Get(prefix);
-  if (!fullHashResponse) {
-    return NS_OK;
-  }
-
-  *aFromCache = true;
-
-  int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
-  int64_t expiryTime;
-
-  FullHashExpiryCache& fullHashes = fullHashResponse->fullHashes;
-  nsDependentCSubstring completion(
-    reinterpret_cast<const char*>(aCompletion.buf), COMPLETE_SIZE);
-
-  // Check if we can find the fullhash in positive cache
-  if (fullHashes.Get(completion, &expiryTime)) {
-    if (nowSec <= expiryTime) {
-      // Url is NOT safe.
-      *aConfirmed = true;
-      LOG(("Found a valid fullhash in the positive cache"));
-    } else {
-      // Trigger a gethash request in this case(aConfirmed is false).
-      LOG(("Found an expired fullhash in the positive cache"));
-
-      // Remove fullhash entry from the cache when the negative cache
-      // is also expired because whether or not the fullhash is cached
-      // locally, we will need to consult the server next time we
-      // lookup this hash. We may as well remove it from our cache.
-      if (fullHashResponse->negativeCacheExpirySec < expiryTime) {
-        fullHashes.Remove(completion);
-        if (fullHashes.Count() == 0 &&
-            fullHashResponse->negativeCacheExpirySec < nowSec) {
-          mCache.Remove(prefix);
-        }
-      }
-    }
-    return NS_OK;
-  }
-
-  // Check negative cache.
-  if (fullHashResponse->negativeCacheExpirySec >= nowSec) {
-    // Url is safe.
-    LOG(("Found a valid prefix in the negative cache"));
-    *aHas = false;
-  } else {
-    LOG(("Found an expired prefix in the negative cache"));
-    if (fullHashes.Count() == 0) {
-      mCache.Remove(prefix);
-    }
-  }
+  // TODO : Bug 1311935 - Implement v4 caching
 
   return NS_OK;
 }
 
+void
+LookupCacheV4::IsHashEntryConfirmed(const Completion& aEntry,
+                                    const TableFreshnessMap& aTableFreshness,
+                                    uint32_t aFreshnessGuarantee,
+                                    bool* aConfirmed)
+{
+  // TODO : Bug 1311935 - Implement v4 caching
+  *aConfirmed = true;
+}
+
 bool
 LookupCacheV4::IsEmpty()
 {
   bool isEmpty;
   mVLPrefixSet->IsEmpty(&isEmpty);
   return isEmpty;
 }
 
@@ -379,27 +325,16 @@ LookupCacheV4::ApplyUpdate(TableUpdateV4
     LOG(("Checksum mismatch after applying partial update"));
     return NS_ERROR_UC_UPDATE_CHECKSUM_MISMATCH;
   }
 
   return NS_OK;
 }
 
 nsresult
-LookupCacheV4::AddFullHashResponseToCache(const FullHashResponseMap& aResponseMap)
-{
-  for (auto iter = aResponseMap.ConstIter(); !iter.Done(); iter.Next()) {
-    CachedFullHashResponse* response = mCache.LookupOrAdd(iter.Key());
-    *response = *(iter.Data());
-  }
-
-  return NS_OK;
-}
-
-nsresult
 LookupCacheV4::InitCrypto(nsCOMPtr<nsICryptoHash>& aCrypto)
 {
   nsresult rv;
   aCrypto = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
   }
 
@@ -599,94 +534,16 @@ LookupCacheV4::LoadMetadata(nsACString& 
   if (NS_FAILED(rv)) {
     LOG(("Failed to read checksum."));
     return rv;
   }
 
   return rv;
 }
 
-void
-LookupCacheV4::ClearCache()
-{
-  mCache.Clear();
-}
-
-// This function remove cache entries whose negative cache time is expired.
-// It is possible that a cache entry whose positive cache time is not yet
-// expired but still being removed after calling this API. Right now we call
-// this on every update.
-void
-LookupCacheV4::InvalidateExpiredCacheEntry()
-{
-  int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
-
-  for (auto iter = mCache.Iter(); !iter.Done(); iter.Next()) {
-    CachedFullHashResponse* response = iter.Data();
-    if (response->negativeCacheExpirySec < nowSec) {
-      iter.Remove();
-    }
-  }
-}
-
-#if defined(DEBUG)
-static
-void CStringToHexString(const nsACString& aIn, nsACString& aOut)
-{
-  static const char* const lut = "0123456789ABCDEF";
-  // 32 bytes is the longest hash
-  size_t len = COMPLETE_SIZE;
-
-  aOut.SetCapacity(2 * len);
-  for (size_t i = 0; i < aIn.Length(); ++i) {
-    const char c = static_cast<const char>(aIn[i]);
-    aOut.Append(lut[(c >> 4) & 0x0F]);
-    aOut.Append(lut[c & 15]);
-  }
-}
-
-static
-nsCString GetFormattedTimeString(int64_t aCurTimeSec)
-{
-  PRExplodedTime pret;
-  PR_ExplodeTime(aCurTimeSec * PR_USEC_PER_SEC, PR_GMTParameters, &pret);
-
-  return nsPrintfCString(
-         "%04d-%02d-%02d %02d:%02d:%02d UTC",
-         pret.tm_year, pret.tm_month + 1, pret.tm_mday,
-         pret.tm_hour, pret.tm_min, pret.tm_sec);
-}
-
-void
-LookupCacheV4::DumpCache()
-{
-  if (!LOG_ENABLED()) {
-    return;
-  }
-
-  for (auto iter = mCache.ConstIter(); !iter.Done(); iter.Next()) {
-    nsAutoCString strPrefix;
-    CStringToHexString(iter.Key(), strPrefix);
-
-    CachedFullHashResponse* response = iter.Data();
-    LOG(("Caches prefix: %s, Expire time: %s",
-         strPrefix.get(),
-         GetFormattedTimeString(response->negativeCacheExpirySec).get()));
-
-    FullHashExpiryCache& fullHashes = response->fullHashes;
-    for (auto iter2 = fullHashes.ConstIter(); !iter2.Done(); iter2.Next()) {
-      nsAutoCString strFullhash;
-      CStringToHexString(iter2.Key(), strFullhash);
-      LOG(("  - %s, Expire time: %s", strFullhash.get(),
-           GetFormattedTimeString(iter2.Data()).get()));
-    }
-  }
-}
-#endif
-
 VLPrefixSet::VLPrefixSet(const PrefixStringMap& aMap)
   : mCount(0)
 {
   for (auto iter = aMap.ConstIter(); !iter.Done(); iter.Next()) {
     uint32_t size = iter.Key();
     mMap.Put(size, new PrefixString(*iter.Data(), size));
     mCount += iter.Data()->Length() / size;
   }
--- a/toolkit/components/url-classifier/LookupCacheV4.h
+++ b/toolkit/components/url-classifier/LookupCacheV4.h
@@ -20,61 +20,52 @@ public:
   explicit LookupCacheV4(const nsACString& aTableName,
                          const nsACString& aProvider,
                          nsIFile* aStoreFile)
     : LookupCache(aTableName, aProvider, aStoreFile) {}
   ~LookupCacheV4() {}
 
   virtual nsresult Init() override;
   virtual nsresult Has(const Completion& aCompletion,
-                       const TableFreshnessMap& aTableFreshness,
-                       uint32_t aFreshnessGuarantee,
                        bool* aHas, uint32_t* aMatchLength,
-                       bool* aConfirmed, bool* aFromCache) override;
+                       bool* aFromCache) override;
 
-  virtual void ClearCache() override;
-
-#if DEBUG
-  virtual void DumpCache() override;
-#endif
+  virtual void IsHashEntryConfirmed(const Completion& aEntry,
+                                    const TableFreshnessMap& aTableFreshness,
+                                    uint32_t aFreshnessGuarantee,
+                                    bool* aConfirmed) override;
 
   virtual bool IsEmpty() override;
 
   nsresult Build(PrefixStringMap& aPrefixMap);
 
   nsresult GetPrefixes(PrefixStringMap& aPrefixMap);
   nsresult GetFixedLengthPrefixes(FallibleTArray<uint32_t>& aPrefixes);
 
   // ApplyUpdate will merge data stored in aTableUpdate with prefixes in aInputMap.
   nsresult ApplyUpdate(TableUpdateV4* aTableUpdate,
                        PrefixStringMap& aInputMap,
                        PrefixStringMap& aOutputMap);
 
-  nsresult AddFullHashResponseToCache(const FullHashResponseMap& aResponseMap);
-
   nsresult WriteMetadata(TableUpdateV4* aTableUpdate);
   nsresult LoadMetadata(nsACString& aState, nsACString& aChecksum);
 
-  void InvalidateExpiredCacheEntry();
-
   static const int VER;
 
 protected:
   virtual nsresult ClearPrefixes() override;
   virtual nsresult StoreToFile(nsIFile* aFile) override;
   virtual nsresult LoadFromFile(nsIFile* aFile) override;
   virtual size_t SizeOfPrefixSet() override;
 
 private:
   virtual int Ver() const override { return VER; }
 
   nsresult InitCrypto(nsCOMPtr<nsICryptoHash>& aCrypto);
   nsresult VerifyChecksum(const nsACString& aChecksum);
 
   RefPtr<VariableLengthPrefixSet> mVLPrefixSet;
-
-  FullHashResponseMap mCache;
 };
 
 } // namespace safebrowsing
 } // namespace mozilla
 
 #endif
--- a/toolkit/components/url-classifier/VariableLengthPrefixSet.cpp
+++ b/toolkit/components/url-classifier/VariableLengthPrefixSet.cpp
@@ -183,17 +183,16 @@ VariableLengthPrefixSet::Matches(const n
   if (found) {
     *aLength = PREFIX_SIZE_FIXED;
     return NS_OK;
   }
 
   for (auto iter = mVLPrefixSet.ConstIter(); !iter.Done(); iter.Next()) {
     if (BinarySearch(aFullHash, *iter.Data(), iter.Key())) {
       *aLength = iter.Key();
-      MOZ_ASSERT(*aLength > 4);
       return NS_OK;
     }
   }
 
   return NS_OK;
 }
 
 nsresult
--- a/toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
+++ b/toolkit/components/url-classifier/nsIUrlClassifierHashCompleter.idl
@@ -1,70 +1,35 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 
-interface nsIArray;
-
-/**
- * This interface contains feilds in Matches object of FullHashResponse(V4).
- * Reference from:
- * https://developers.google.com/safe-browsing/v4/update-api#http-post-response_2
- */
-[scriptable, uuid(aabeb50e-d9f7-418e-9469-2cd9608958c0)]
-interface nsIFullHashMatch : nsISupports
-{
-  readonly attribute ACString tableName;
-
-  readonly attribute ACString fullHash;
-
-  readonly attribute uint32_t cacheDuration;
-};
-
 /**
  * This interface is implemented by nsIUrlClassifierHashCompleter clients.
  */
 [scriptable, uuid(da16de40-df26-414d-bde7-c4faf4504868)]
 interface nsIUrlClassifierHashCompleterCallback : nsISupports
 {
   /**
    * A complete hash has been found that matches the partial hash.
    * This method may be called 0-n times for a given
    * nsIUrlClassifierCompleter::complete() call.
    *
    * @param hash
-   *        The 256-bit hash that was discovered.
+   *        The 128-bit hash that was discovered.
    * @param table
    *        The name of the table that this hash belongs to.
    * @param chunkId
    *        The database chunk that this hash belongs to.
    */
-  void completionV2(in ACString hash,
-                    in ACString table,
-                    in uint32_t chunkId);
-
-  /**
-   * This will be called when a fullhash response is received and parsed
-   * no matter if any full hash has been found.
-   *
-   * @param partialHash
-   *        The hash that was sent for completion.
-   * @param table
-   *        The name of the table that this hash belongs to.
-   * @param negativeCacheDuration
-   *        The negative cache duration in millisecond.
-   * @param fullHashes
-   *        Array of fullhashes that match the prefix.
-   */
-  void completionV4(in ACString partialHash,
-                    in ACString table,
-                    in uint32_t negativeCacheDuration,
-                    in nsIArray fullHashes);
+  void completion(in ACString hash,
+                  in ACString table,
+                  in uint32_t chunkId);
 
   /**
    * The completion is complete.  This method is called once per
    * nsIUrlClassifierCompleter::complete() call, after all completion()
    * calls are finished.
    *
    * @param status
    *        NS_OK if the request completed successfully, or an error code.
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
@@ -1,17 +1,16 @@
 //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsAppDirectoryServiceDefs.h"
-#include "nsArrayUtils.h"
 #include "nsCRT.h"
 #include "nsICryptoHash.h"
 #include "nsICryptoHMAC.h"
 #include "nsIDirectoryService.h"
 #include "nsIKeyModule.h"
 #include "nsIObserverService.h"
 #include "nsIPermissionManager.h"
 #include "nsIPrefBranch.h"
@@ -118,19 +117,16 @@ LazyLogModule gUrlClassifierDbServiceLog
 #define BLOCKED_TABLE_PREF              "urlclassifier.blockedTable"
 #define DOWNLOAD_BLOCK_TABLE_PREF       "urlclassifier.downloadBlockTable"
 #define DOWNLOAD_ALLOW_TABLE_PREF       "urlclassifier.downloadAllowTable"
 #define DISALLOW_COMPLETION_TABLE_PREF  "urlclassifier.disallow_completions"
 
 #define CONFIRM_AGE_PREF        "urlclassifier.max-complete-age"
 #define CONFIRM_AGE_DEFAULT_SEC (45 * 60)
 
-// 30 minutes as the maximum negative cache duration.
-#define MAXIMUM_NEGATIVE_CACHE_DURATION_SEC (30 * 60 * 1000)
-
 // TODO: The following two prefs are to be removed after we
 //       roll out full v4 hash completion. See Bug 1331534.
 #define TAKE_V4_COMPLETION_RESULT_PREF    "browser.safebrowsing.temporary.take_v4_completion_result"
 #define TAKE_V4_COMPLETION_RESULT_DEFAULT false
 
 class nsUrlClassifierDBServiceWorker;
 
 // Singleton instance.
@@ -837,121 +833,80 @@ nsUrlClassifierDBServiceWorker::CloseDb(
 nsresult
 nsUrlClassifierDBServiceWorker::CacheCompletions(CacheResultArray *results)
 {
   if (gShuttingDownThread) {
     return NS_ERROR_ABORT;
   }
 
   LOG(("nsUrlClassifierDBServiceWorker::CacheCompletions [%p]", this));
-  if (!mClassifier) {
+  if (!mClassifier)
     return NS_OK;
-  }
 
   // Ownership is transferred in to us
   nsAutoPtr<CacheResultArray> resultsPtr(results);
 
-  if (resultsPtr->Length() == 0) {
-    return NS_OK;
-  }
-
-  if (IsSameAsLastResults(*resultsPtr)) {
+  if (mLastResults == *resultsPtr) {
     LOG(("Skipping completions that have just been cached already."));
     return NS_OK;
   }
 
-  nsAutoPtr<ProtocolParser> pParse;
-  pParse = resultsPtr->ElementAt(0)->Ver() == CacheResult::V2 ?
-             static_cast<ProtocolParser*>(new ProtocolParserV2()) :
-             static_cast<ProtocolParser*>(new ProtocolParserProtobuf());
+  nsAutoPtr<ProtocolParserV2> pParse(new ProtocolParserV2());
+  nsTArray<TableUpdate*> updates;
 
   // Only cache results for tables that we have, don't take
   // in tables we might accidentally have hit during a completion.
   // This happens due to goog vs googpub lists existing.
   nsTArray<nsCString> tables;
   nsresult rv = mClassifier->ActiveTables(tables);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  nsTArray<TableUpdate*> updates;
-
   for (uint32_t i = 0; i < resultsPtr->Length(); i++) {
     bool activeTable = false;
-    CacheResult* result = resultsPtr->ElementAt(i).get();
-
     for (uint32_t table = 0; table < tables.Length(); table++) {
-      if (tables[table].Equals(result->table)) {
+      if (tables[table].Equals(resultsPtr->ElementAt(i).table)) {
         activeTable = true;
         break;
       }
     }
     if (activeTable) {
-      TableUpdate* tu = pParse->GetTableUpdate(result->table);
+      TableUpdateV2* tuV2 = TableUpdate::Cast<TableUpdateV2>(
+        pParse->GetTableUpdate(resultsPtr->ElementAt(i).table));
 
-      rv = CacheResultToTableUpdate(result, tu);
+      // Ignore V4 for now.
+      if (!tuV2) {
+        continue;
+      }
+
+      LOG(("CacheCompletion Addchunk %d hash %X", resultsPtr->ElementAt(i).entry.addChunk,
+           resultsPtr->ElementAt(i).entry.ToUint32()));
+      rv = tuV2->NewAddComplete(resultsPtr->ElementAt(i).entry.addChunk,
+                                resultsPtr->ElementAt(i).entry.complete);
       if (NS_FAILED(rv)) {
         // We can bail without leaking here because ForgetTableUpdates
         // hasn't been called yet.
         return rv;
       }
-      updates.AppendElement(tu);
+      rv = tuV2->NewAddChunk(resultsPtr->ElementAt(i).entry.addChunk);
+      if (NS_FAILED(rv)) {
+        return rv;
+      }
+      updates.AppendElement(tuV2);
       pParse->ForgetTableUpdates();
     } else {
       LOG(("Completion received, but table is not active, so not caching."));
     }
    }
 
   mClassifier->ApplyFullHashes(&updates);
-  mLastResults = Move(resultsPtr);
+  mLastResults = *resultsPtr;
   return NS_OK;
 }
 
 nsresult
-nsUrlClassifierDBServiceWorker::CacheResultToTableUpdate(CacheResult* aCacheResult,
-                                                         TableUpdate* aUpdate)
-{
-  auto tuV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate);
-  if (tuV2) {
-    auto result = CacheResult::Cast<CacheResultV2>(aCacheResult);
-    MOZ_ASSERT(result);
-
-    LOG(("CacheCompletion hash %X, Addchunk %d", result->completion.ToUint32(),
-         result->addChunk));
-
-    nsresult rv = tuV2->NewAddComplete(result->addChunk, result->completion);
-    if (NS_FAILED(rv)) {
-      return rv;
-    }
-    rv = tuV2->NewAddChunk(result->addChunk);
-    return rv;
-  }
-
-  auto tuV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate);
-  if (tuV4) {
-    auto result = CacheResult::Cast<CacheResultV4>(aCacheResult);
-    MOZ_ASSERT(result);
-
-    if (LOG_ENABLED()) {
-      const FullHashExpiryCache& fullHashes = result->response.fullHashes;
-      for (auto iter = fullHashes.ConstIter(); !iter.Done(); iter.Next()) {
-        Completion completion;
-        completion.Assign(iter.Key());
-        LOG(("CacheCompletion(v4) hash %X, CacheExpireTime %" PRId64,
-             completion.ToUint32(), iter.Data()));
-      }
-    }
-
-    tuV4->NewFullHashResponse(result->prefix, result->response);
-    return NS_OK;
-  }
-
-  // tableUpdate object should be either v2 or v4.
-  return NS_ERROR_FAILURE;
-}
-
-nsresult
 nsUrlClassifierDBServiceWorker::CacheMisses(PrefixArray *results)
 {
   LOG(("nsUrlClassifierDBServiceWorker::CacheMisses [%p] %" PRIuSIZE,
        this, results->Length()));
 
   // Ownership is transferred in to us
   nsAutoPtr<PrefixArray> resultsPtr(results);
 
@@ -1002,49 +957,20 @@ nsUrlClassifierDBServiceWorker::SetLastU
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsUrlClassifierDBServiceWorker::ClearLastResults()
 {
   MOZ_ASSERT(!NS_IsMainThread(), "Must be on the background thread");
-  if (mLastResults) {
-    mLastResults->Clear();
-  }
+  mLastResults.Clear();
   return NS_OK;
 }
 
-bool
-nsUrlClassifierDBServiceWorker::IsSameAsLastResults(CacheResultArray& aResult)
-{
-  if (!mLastResults || mLastResults->Length() != aResult.Length()) {
-    return false;
-  }
-
-  bool equal = true;
-  for (uint32_t i = 0; i < mLastResults->Length() && equal; i++) {
-    CacheResult* lhs = mLastResults->ElementAt(i).get();
-    CacheResult* rhs = aResult[i].get();
-
-    if (lhs->Ver() != rhs->Ver()) {
-      return false;
-    }
-
-    if (lhs->Ver() == CacheResult::V2) {
-      equal = *(CacheResult::Cast<CacheResultV2>(lhs)) ==
-              *(CacheResult::Cast<CacheResultV2>(rhs));
-    } else if (lhs->Ver() == CacheResult::V4) {
-      equal = *(CacheResult::Cast<CacheResultV4>(lhs)) ==
-              *(CacheResult::Cast<CacheResultV4>(rhs));
-    }
-  }
-
-  return equal;
-}
 
 // -------------------------------------------------------------------------
 // nsUrlClassifierLookupCallback
 //
 // This class takes the results of a lookup found on the worker thread
 // and handles any necessary partial hash expansions before calling
 // the client callback.
 
@@ -1063,17 +989,16 @@ public:
     , mPendingCompletions(0)
     , mCallback(c)
     {}
 
 private:
   ~nsUrlClassifierLookupCallback();
 
   nsresult HandleResults();
-  nsresult ProcessComplete(CacheResult* aCacheResult);
 
   RefPtr<nsUrlClassifierDBService> mDBService;
   nsAutoPtr<LookupResultArray> mResults;
 
   // Completed results to send back to the worker for caching.
   nsAutoPtr<CacheResultArray> mCacheResults;
 
   uint32_t mPendingCompletions;
@@ -1179,107 +1104,49 @@ nsUrlClassifierLookupCallback::Completio
   if (mPendingCompletions == 0) {
     HandleResults();
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsUrlClassifierLookupCallback::CompletionV2(const nsACString& aCompleteHash,
-                                            const nsACString& aTableName,
-                                            uint32_t aChunkId)
+nsUrlClassifierLookupCallback::Completion(const nsACString& completeHash,
+                                          const nsACString& tableName,
+                                          uint32_t chunkId)
 {
   LOG(("nsUrlClassifierLookupCallback::Completion [%p, %s, %d]",
-       this, PromiseFlatCString(aTableName).get(), aChunkId));
-
-  MOZ_ASSERT(!StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
-
-  auto result = new CacheResultV2;
-
-  result->table = aTableName;
-  result->completion.Assign(aCompleteHash);
-  result->addChunk = aChunkId;
-
-  return ProcessComplete(result);
-}
-
-NS_IMETHODIMP
-nsUrlClassifierLookupCallback::CompletionV4(const nsACString& aPartialHash,
-                                            const nsACString& aTableName,
-                                            uint32_t aNegativeCacheDuration,
-                                            nsIArray* aFullHashes)
-{
-  LOG(("nsUrlClassifierLookupCallback::CompletionV4 [%p, %s, %d]",
-       this, PromiseFlatCString(aTableName).get(), aNegativeCacheDuration));
-
-  MOZ_ASSERT(StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto")));
-
-  if(!aFullHashes) {
-    return NS_ERROR_INVALID_ARG;
-  }
-
-  if (aNegativeCacheDuration > MAXIMUM_NEGATIVE_CACHE_DURATION_SEC) {
-    LOG(("Negative cache duration too large, clamping it down to"
-         "a reasonable value."));
-    aNegativeCacheDuration = MAXIMUM_NEGATIVE_CACHE_DURATION_SEC;
-  }
+       this, PromiseFlatCString(tableName).get(), chunkId));
 
-  auto result = new CacheResultV4;
-
-  int64_t nowSec = PR_Now() / PR_USEC_PER_SEC;
-
-  result->table = aTableName;
-  result->prefix = aPartialHash;
-  result->response.negativeCacheExpirySec = nowSec + aNegativeCacheDuration;
-
-  // Fill in positive cache entries.
-  uint32_t fullHashCount = 0;
-  nsresult rv = aFullHashes->GetLength(&fullHashCount);
-  if (NS_FAILED(rv)) {
-    return rv;
-  }
+  mozilla::safebrowsing::Completion hash;
+  hash.Assign(completeHash);
 
-  for (uint32_t i = 0; i < fullHashCount; i++) {
-    nsCOMPtr<nsIFullHashMatch> match = do_QueryElementAt(aFullHashes, i);
-
-    nsCString fullHash;
-    match->GetFullHash(fullHash);
-
-    uint32_t duration;
-    match->GetCacheDuration(&duration);
-
-    result->response.fullHashes.Put(fullHash, nowSec + duration);
-  }
-
-  return ProcessComplete(result);
-}
-
-nsresult
-nsUrlClassifierLookupCallback::ProcessComplete(CacheResult* aCacheResult)
-{
   // Send this completion to the store for caching.
   if (!mCacheResults) {
     mCacheResults = new CacheResultArray();
-    if (!mCacheResults) {
+    if (!mCacheResults)
       return NS_ERROR_OUT_OF_MEMORY;
-    }
   }
 
+  CacheResult result;
+  result.entry.addChunk = chunkId;
+  result.entry.complete = hash;
+  result.table = tableName;
+
   // OK if this fails, we just won't cache the item.
-  mCacheResults->AppendElement(aCacheResult);
+  mCacheResults->AppendElement(result);
 
   // Check if this matched any of our results.
   for (uint32_t i = 0; i < mResults->Length(); i++) {
     LookupResult& result = mResults->ElementAt(i);
 
     // Now, see if it verifies a lookup
     if (!result.mNoise
-        && result.mTableName.Equals(aCacheResult->table)
-        && aCacheResult->findCompletion(result.CompleteHash())) {
+        && result.CompleteHash() == hash
+        && result.mTableName.Equals(tableName)) {
       result.mProtocolConfirmed = true;
     }
   }
 
   return NS_OK;
 }
 
 
--- a/toolkit/components/url-classifier/nsUrlClassifierDBService.h
+++ b/toolkit/components/url-classifier/nsUrlClassifierDBService.h
@@ -213,21 +213,16 @@ private:
                     const nsACString& tables,
                     nsIUrlClassifierLookupCallback* c);
 
   nsresult AddNoise(const Prefix aPrefix,
                     const nsCString tableName,
                     uint32_t aCount,
                     LookupResultArray& results);
 
-  nsresult CacheResultToTableUpdate(CacheResult* aCacheResult,
-                                    TableUpdate* aUpdate);
-
-  bool IsSameAsLastResults(CacheResultArray& aResult);
-
   // Can only be used on the background thread
   nsCOMPtr<nsICryptoHash> mCryptoHash;
 
   nsAutoPtr<mozilla::safebrowsing::Classifier> mClassifier;
   // The class that actually parses the update chunks.
   nsAutoPtr<ProtocolParser> mProtocolParser;
 
   // Directory where to store the SB databases.
@@ -239,17 +234,17 @@ private:
 
   uint32_t mUpdateWaitSec;
 
   // Entries that cannot be completed. We expect them to die at
   // the next update
   PrefixArray mMissCache;
 
   // Stores the last results that triggered a table update.
-  nsAutoPtr<CacheResultArray> mLastResults;
+  CacheResultArray mLastResults;
 
   nsresult mUpdateStatus;
   nsTArray<nsCString> mUpdateTables;
 
   nsCOMPtr<nsIUrlClassifierUpdateObserver> mUpdateObserver;
   bool mInStream;
 
   // The number of noise entries to add to the set of lookup results.
--- a/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
+++ b/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
@@ -144,30 +144,16 @@ function httpStatusToBucket(httpStatus) 
     statusBucket = 14;
     break;
   default:
     statusBucket = 15;
   };
   return statusBucket;
 }
 
-function FullHashMatch(table, hash, duration) {
-  this.tableName = table;
-  this.fullHash = hash;
-  this.cacheDuration = duration;
-}
-
-FullHashMatch.prototype = {
-  QueryInterface: XPCOMUtils.generateQI([Ci.nsIFullHashMatch]),
-
-  tableName : null,
-  fullHash : null,
-  cacheDuration : null,
-};
-
 function HashCompleter() {
   // The current HashCompleterRequest in flight. Once it is started, it is set
   // to null. It may be used by multiple calls to |complete| in succession to
   // avoid creating multiple requests to the same gethash URL.
   this._currentRequest = null;
   // A map of gethashUrls to HashCompleterRequests that haven't yet begun.
   this._pendingRequests = {};
 
@@ -324,18 +310,17 @@ HashCompleterRequest.prototype = {
                                          Ci.nsISupports]),
 
   // This is called by the HashCompleter to add a hash and callback to the
   // HashCompleterRequest. It must be called before calling |begin|.
   add: function HCR_add(aPartialHash, aCallback, aTableName) {
     this._requests.push({
       partialHash: aPartialHash,
       callback: aCallback,
-      tableName: aTableName,
-      response: { matches:[] },
+      responses: []
     });
 
     if (aTableName) {
       let isTableNameV4 = aTableName.endsWith('-proto');
       if (0 === this.tableNames.size) {
         // Decide if this request is v4 by the first added partial hash.
         this.isV4 = isTableNameV4;
       } else if (this.isV4 !== isTableNameV4) {
@@ -526,17 +511,17 @@ HashCompleterRequest.prototype = {
 
     let uploadChannel = this._channel.QueryInterface(Ci.nsIUploadChannel);
     uploadChannel.setUploadStream(inputStream, "text/plain", -1);
 
     let httpChannel = this._channel.QueryInterface(Ci.nsIHttpChannel);
     httpChannel.requestMethod = "POST";
   },
 
-  // Parses the response body and eventually adds items to the |response.matches| array
+  // Parses the response body and eventually adds items to the |responses| array
   // for elements of |this._requests|.
   handleResponse: function HCR_handleResponse() {
     if (this._response == "") {
       return;
     }
 
     if (this.isV4) {
       return this.handleResponseV4();
@@ -547,18 +532,16 @@ HashCompleterRequest.prototype = {
     let length = this._response.length;
     while (start != length) {
       start = this.handleTable(start);
     }
   },
 
   handleResponseV4: function HCR_handleResponseV4() {
     let callback = {
-      // onCompleteHashFound will be called for each fullhash found in
-      // FullHashResponse.
       onCompleteHashFound : (aCompleteHash,
                              aTableNames,
                              aPerHashCacheDuration) => {
         log("V4 fullhash response complete hash found callback: " +
             JSON.stringify(aCompleteHash) + ", " +
             aTableNames + ", CacheDuration(" + aPerHashCacheDuration + ")");
 
         // Filter table names which we didn't requested.
@@ -568,51 +551,39 @@ HashCompleterRequest.prototype = {
         if (0 === filteredTables.length) {
           log("ERROR: Got complete hash which is from unknown table.");
           return;
         }
         if (filteredTables.length > 1) {
           log("WARNING: Got complete hash which has ambigious threat type.");
         }
 
-        this.handleItem({
-          completeHash: aCompleteHash,
-          tableName: filteredTables[0],
-          cacheDuration: aPerHashCacheDuration
-        });
+        this.handleItem(aCompleteHash, filteredTables[0], 0);
+
+        // TODO: Bug 1311935 - Implement v4 cache.
       },
 
-      // onResponseParsed will be called no matter if there is match in
-      // FullHashResponse, the callback is mainly used to pass negative cache
-      // duration and minimum wait duration.
       onResponseParsed : (aMinWaitDuration,
                           aNegCacheDuration) => {
         log("V4 fullhash response parsed callback: " +
             "MinWaitDuration(" + aMinWaitDuration + "), " +
             "NegativeCacheDuration(" + aNegCacheDuration + ")");
 
         let minWaitDuration = aMinWaitDuration;
 
         if (aMinWaitDuration > MIN_WAIT_DURATION_MAX_VALUE) {
-          log("WARNING: Minimum wait duration too large, clamping it down " +
-              "to a reasonable value.");
           minWaitDuration = MIN_WAIT_DURATION_MAX_VALUE;
         } else if (aMinWaitDuration < 0) {
-          log("WARNING: Minimum wait duration is negative, reset it to 0");
           minWaitDuration = 0;
         }
 
         this._completer._nextGethashTimeMs[this.gethashUrl] =
           Date.now() + minWaitDuration;
 
-        // A fullhash request may contain more than one prefix, so the negative
-        // cache duration should be set for all the prefixes in the request.
-        this._requests.forEach(request => {
-          request.response.negCacheDuration = aNegCacheDuration;
-        });
+        // TODO: Bug 1311935 - Implement v4 cache.
       },
     };
 
     gUrlUtil.parseFindFullHashResponseV4(this._response, callback);
   },
 
   // This parses a table entry in the response body and calls |handleItem|
   // for complete hash in the table entry.
@@ -639,68 +610,53 @@ HashCompleterRequest.prototype = {
     if (dataLength % COMPLETE_LENGTH != 0 ||
         dataLength == 0 ||
         dataLength > body.length - (newlineIndex + 1)) {
       throw errorWithStack();
     }
 
     let data = body.substr(newlineIndex + 1, dataLength);
     for (let i = 0; i < (dataLength / COMPLETE_LENGTH); i++) {
-      this.handleItem({
-        completeHash: data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH),
-        tableName: list,
-        chunkId: addChunk
-      });
+      this.handleItem(data.substr(i * COMPLETE_LENGTH, COMPLETE_LENGTH), list,
+                      addChunk);
     }
 
     return aStart + newlineIndex + 1 + dataLength;
   },
 
   // This adds a complete hash to any entry in |this._requests| that matches
   // the hash.
-  handleItem: function HCR_handleItem(aData) {
+  handleItem: function HCR_handleItem(aData, aTableName, aChunkId) {
     for (let i = 0; i < this._requests.length; i++) {
       let request = this._requests[i];
-      if (aData.completeHash.startsWith(request.partialHash)) {
-        request.response.matches.push(aData);
+      if (aData.startsWith(request.partialHash)) {
+        request.responses.push({
+          completeHash: aData,
+          tableName: aTableName,
+          chunkId: aChunkId,
+        });
       }
     }
   },
 
   // notifySuccess and notifyFailure are used to alert the callbacks with
   // results. notifySuccess makes |completion| and |completionFinished| calls
   // while notifyFailure only makes a |completionFinished| call with the error
   // code.
   notifySuccess: function HCR_notifySuccess() {
-    // V2 completion handler
-    let completionV2 = (req) => {
-      req.response.matches.forEach((m) => {
-        req.callback.completionV2(m.completeHash, m.tableName, m.chunkId);
-      });
-
-      req.callback.completionFinished(Cr.NS_OK);
-    };
-
-    // V4 completion handler
-    let completionV4 = (req) => {
-      let matches = Cc["@mozilla.org/array;1"].createInstance(Ci.nsIMutableArray);
+    for (let i = 0; i < this._requests.length; i++) {
+      let request = this._requests[i];
+      for (let j = 0; j < request.responses.length; j++) {
+        let response = request.responses[j];
+        request.callback.completion(response.completeHash, response.tableName,
+                                    response.chunkId);
+      }
 
-      req.response.matches.forEach(m => {
-        matches.appendElement(
-          new FullHashMatch(m.tableName, m.completeHash, m.cacheDuration), false);
-      });
-
-      req.callback.completionV4(req.partialHash, req.tableName,
-                                req.response.negCacheDuration, matches);
-
-      req.callback.completionFinished(Cr.NS_OK);
-    };
-
-    let completion = this.isV4 ? completionV4 : completionV2;
-    this._requests.forEach((req) => { completion(req); });
+      request.callback.completionFinished(Cr.NS_OK);
+    }
   },
 
   notifyFailure: function HCR_notifyFailure(aStatus) {
     log("notifying failure\n");
     for (let i = 0; i < this._requests.length; i++) {
       let request = this._requests[i];
       request.callback.completionFinished(aStatus);
     }
--- a/toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
@@ -454,26 +454,26 @@ nsUrlClassifierUtils::ParseFindFullHashR
   for (auto& m : r.matches()) {
     nsCString tableNames;
     nsresult rv = ConvertThreatTypeToListNames(m.threat_type(), tableNames);
     if (NS_FAILED(rv)) {
       hasUnknownThreatType = true;
       continue; // Ignore un-convertable threat type.
     }
     auto& hash = m.threat().hash();
-    auto cacheDuration = m.cache_duration().seconds();
+    auto cacheDuration = DurationToMs(m.cache_duration());
     aCallback->OnCompleteHashFound(nsCString(hash.c_str(), hash.length()),
                                    tableNames, cacheDuration);
 
     Telemetry::Accumulate(Telemetry::URLCLASSIFIER_POSITIVE_CACHE_DURATION,
                           cacheDuration);
   }
 
   auto minWaitDuration = DurationToMs(r.minimum_wait_duration());
-  auto negCacheDuration = r.negative_cache_duration().seconds();
+  auto negCacheDuration = DurationToMs(r.negative_cache_duration());
 
   aCallback->OnResponseParsed(minWaitDuration, negCacheDuration);
 
   Telemetry::Accumulate(Telemetry::URLCLASSIFIER_COMPLETION_ERROR,
                         hasUnknownThreatType ? UNKNOWN_THREAT_TYPE : SUCCESS);
 
   Telemetry::Accumulate(Telemetry::URLCLASSIFIER_NEGATIVE_CACHE_DURATION,
                         negCacheDuration);
--- a/toolkit/components/url-classifier/tests/gtest/Common.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/Common.cpp
@@ -5,19 +5,16 @@
 #include "nsTArray.h"
 #include "nsIThread.h"
 #include "nsThreadUtils.h"
 #include "nsUrlClassifierUtils.h"
 
 using namespace mozilla;
 using namespace mozilla::safebrowsing;
 
-#define GTEST_SAFEBROWSING_DIR NS_LITERAL_CSTRING("safebrowsing")
-#define GTEST_TABLE NS_LITERAL_CSTRING("gtest-malware-proto")
-
 template<typename Function>
 void RunTestInNewThread(Function&& aFunction) {
   nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(mozilla::Forward<Function>(aFunction));
   nsCOMPtr<nsIThread> testingThread;
   nsresult rv =
     NS_NewNamedThread("Testing Thread", getter_AddRefs(testingThread), r);
   ASSERT_EQ(rv, NS_OK);
   testingThread->Shutdown();
@@ -155,27 +152,8 @@ GeneratePrefix(const nsCString& aFragmen
   nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
   complete.FromPlaintext(aFragment, cryptoHash);
 
   nsCString hash;
   hash.Assign((const char *)complete.buf, aLength);
   return hash;
 }
 
-UniquePtr<LookupCacheV4>
-SetupLookupCacheV4(const _PrefixArray& prefixArray)
-{
-  nsCOMPtr<nsIFile> file;
-  NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
-
-  file->AppendNative(GTEST_SAFEBROWSING_DIR);
-
-  UniquePtr<LookupCacheV4> cache = MakeUnique<LookupCacheV4>(GTEST_TABLE, EmptyCString(), file);
-  nsresult rv = cache->Init();
-  EXPECT_EQ(rv, NS_OK);
-
-  PrefixStringMap map;
-  PrefixArrayToPrefixStringMap(prefixArray, map);
-  rv = cache->Build(map);
-  EXPECT_EQ(rv, NS_OK);
-
-  return Move(cache);
-}
--- a/toolkit/components/url-classifier/tests/gtest/Common.h
+++ b/toolkit/components/url-classifier/tests/gtest/Common.h
@@ -1,26 +1,22 @@
 #include "HashStore.h"
-#include "LookupCacheV4.h"
 #include "nsIFile.h"
 #include "nsTArray.h"
 #include "gtest/gtest.h"
 
 using namespace mozilla;
 using namespace mozilla::safebrowsing;
 
 namespace mozilla {
 namespace safebrowsing {
     class Classifier;
 }
 }
 
-typedef nsCString _Fragment;
-typedef nsTArray<nsCString> _PrefixArray;
-
 template<typename Function>
 void RunTestInNewThread(Function&& aFunction);
 
 // Synchronously apply updates by calling Classifier::AsyncApplyUpdates.
 nsresult SyncApplyUpdates(Classifier* aClassifier,
                           nsTArray<TableUpdate*>* aUpdates);
 
 // Return nsIFile with root directory - NS_APP_USER_PROFILE_50_DIR
@@ -38,11 +34,8 @@ void ApplyUpdate(TableUpdate* update);
 void PrefixArrayToPrefixStringMap(const nsTArray<nsCString>& prefixArray,
                                   PrefixStringMap& out);
 
 nsresult PrefixArrayToAddPrefixArrayV2(const nsTArray<nsCString>& prefixArray,
                                        AddPrefixArray& out);
 
 // Generate a hash prefix from string
 nsCString GeneratePrefix(const nsCString& aFragment, uint8_t aLength);
-
-// Create a LookupCacheV4 object with sepecified prefix array.
-UniquePtr<LookupCacheV4> SetupLookupCacheV4(const _PrefixArray& prefixArray);
deleted file mode 100644
--- a/toolkit/components/url-classifier/tests/gtest/TestCachingV4.cpp
+++ /dev/null
@@ -1,232 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "Common.h"
-
-#define EXPIRED_TIME_SEC     (PR_Now() / PR_USEC_PER_SEC - 3600)
-#define NOTEXPIRED_TIME_SEC  (PR_Now() / PR_USEC_PER_SEC + 3600)
-
-static void
-SetupCacheEntry(LookupCacheV4* aLookupCache,
-                const nsCString& aCompletion,
-                bool aNegExpired = false,
-                bool aPosExpired = false)
-{
-  FullHashResponseMap map;
-  CachedFullHashResponse* response = map.LookupOrAdd(
-    GeneratePrefix(aCompletion, PREFIX_SIZE));
-
-  response->negativeCacheExpirySec = aNegExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC;
-  response->fullHashes.Put(GeneratePrefix(aCompletion, COMPLETE_SIZE),
-                           aPosExpired ? EXPIRED_TIME_SEC : NOTEXPIRED_TIME_SEC);
-
-  aLookupCache->AddFullHashResponseToCache(map);
-}
-
-void
-TestCache(const Completion aCompletion,
-          bool aExpectedHas,
-          bool aExpectedConfirmed,
-          bool aExpectedFromCache,
-          LookupCacheV4* aCache = nullptr)
-{
-  bool has, fromCache, confirmed;
-  uint32_t matchLength;
-  TableFreshnessMap dummy;
-
-  if (aCache) {
-    aCache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
-  } else {
-    _PrefixArray array = { GeneratePrefix(_Fragment("cache.notexpired.com/"), 10),
-                           GeneratePrefix(_Fragment("cache.expired.com/"), 8),
-                           GeneratePrefix(_Fragment("gound.com/"), 5),
-                           GeneratePrefix(_Fragment("small.com/"), 4)
-                         };
-
-    UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
-    // Create an expired entry and a non-expired entry
-    SetupCacheEntry(cache.get(), _Fragment("cache.notexpired.com/"));
-    SetupCacheEntry(cache.get(), _Fragment("cache.expired.com/"), true, true);
-
-    cache->Has(aCompletion, dummy, 0, &has, &matchLength, &confirmed, &fromCache);
-  }
-
-  EXPECT_EQ(has, aExpectedHas);
-  EXPECT_EQ(confirmed, aExpectedConfirmed);
-  EXPECT_EQ(fromCache, aExpectedFromCache);
-}
-
-void
-TestCache(const _Fragment& aFragment,
-          bool aExpectedHas,
-          bool aExpectedConfirmed,
-          bool aExpectedFromCache,
-          LookupCacheV4* aCache = nullptr)
-{
-  Completion lookupHash;
-  nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
-  lookupHash.FromPlaintext(aFragment, cryptoHash);
-
-  TestCache(lookupHash, aExpectedHas, aExpectedConfirmed, aExpectedFromCache, aCache);
-}
-
-// This testcase check the returned result of |Has| API if fullhash cannot match
-// any prefix in the local database.
-TEST(CachingV4, NotFound)
-{
-  TestCache(_Fragment("nomatch.com/"), false, false, false);
-}
-
-// This testcase check the returned result of |Has| API if fullhash find a match
-// in the local database but not in the cache.
-TEST(CachingV4, NotInCache)
-{
-  TestCache(_Fragment("gound.com/"), true, false, false);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in positive cache.
-TEST(CachingV4, InPositiveCacheNotExpired)
-{
-  TestCache(_Fragment("cache.notexpired.com/"), true, true, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in positive cache but that it is expired.
-TEST(CachingV4, InPositiveCacheExpired)
-{
-  TestCache(_Fragment("cache.expired.com/"), true, false, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in negative cache.
-TEST(CachingV4, InNegativeCacheNotExpired)
-{
-  // Create a fullhash whose prefix matches the prefix in negative cache
-  // but completion doesn't match any fullhash in positive cache.
-  nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
-
-  Completion prefix;
-  prefix.FromPlaintext(_Fragment("cache.notexpired.com/"), cryptoHash);
-
-  Completion fullhash;
-  fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
-
-  // Overwrite the 4-byte prefix of `fullhash` so that it conflicts with `prefix`.
-  // Since "cache.notexpired.com" is added to database in TestCache as a
-  // 10-byte prefix, we should copy more than 10 bytes to fullhash to ensure
-  // it can match the prefix in database.
-  memcpy(fullhash.buf, prefix.buf, 10);
-
-  TestCache(fullhash, false, false, true);
-}
-
-// This testcase check the returned result of |Has| API if fullhash matches
-// a cache entry in negative cache but that entry is expired.
-TEST(CachingV4, InNegativeCacheExpired)
-{
-  // Create a fullhash whose prefix is in the cache.
-  nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
-
-  Completion prefix;
-  prefix.FromPlaintext(_Fragment("cache.expired.com/"), cryptoHash);
-
-  Completion fullhash;
-  fullhash.FromPlaintext(_Fragment("firefox.com/"), cryptoHash);
-
-  memcpy(fullhash.buf, prefix.buf, 10);
-
-  TestCache(fullhash, true, false, true);
-}
-
-#define CACHED_URL              _Fragment("cache.com/")
-#define NEG_CACHE_EXPIRED_URL   _Fragment("cache.negExpired.com/")
-#define POS_CACHE_EXPIRED_URL   _Fragment("cache.posExpired.com/")
-#define BOTH_CACHE_EXPIRED_URL  _Fragment("cache.negAndposExpired.com/")
-
-// This testcase create 4 cache entries.
-// 1. unexpired entry.
-// 2. an entry whose negative cache time is expired but whose positive cache
-//    is not expired.
-// 3. an entry whose positive cache time is expired
-// 4. an entry whose negative cache time and positive cache time are expired
-// After calling |InvalidateExpiredCacheEntry| API, entries with expired
-// negative time should be removed from cache(2 & 4)
-TEST(CachingV4, InvalidateExpiredCacheEntry)
-{
-  _PrefixArray array = { GeneratePrefix(CACHED_URL, 10),
-                         GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8),
-                         GeneratePrefix(POS_CACHE_EXPIRED_URL, 5),
-                         GeneratePrefix(BOTH_CACHE_EXPIRED_URL, 4)
-                       };
-
-  UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
-  SetupCacheEntry(cache.get(), CACHED_URL, false, false);
-  SetupCacheEntry(cache.get(), NEG_CACHE_EXPIRED_URL, true, false);
-  SetupCacheEntry(cache.get(), POS_CACHE_EXPIRED_URL, false, true);
-  SetupCacheEntry(cache.get(), BOTH_CACHE_EXPIRED_URL, true, true);
-
-  // Before invalidate
-  TestCache(CACHED_URL, true, true, true, cache.get());
-  TestCache(NEG_CACHE_EXPIRED_URL, true, true, true, cache.get());
-  TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
-  TestCache(BOTH_CACHE_EXPIRED_URL, true, false, true, cache.get());
-
-  // Call InvalidateExpiredCacheEntry to remove cache entries whose negative cache
-  // time is expired
-  cache->InvalidateExpiredCacheEntry();
-
-  // After invalidate, NEG_CACHE_EXPIRED_URL & BOTH_CACHE_EXPIRED_URL should
-  // not be found in cache.
-  TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
-  TestCache(BOTH_CACHE_EXPIRED_URL, true, false, false, cache.get());
-
-  // Other entries should remain the same result.
-  TestCache(CACHED_URL, true, true, true, cache.get());
-  TestCache(POS_CACHE_EXPIRED_URL, true, false, true, cache.get());
-}
-
-// This testcase check if an cache entry whose negative cache time is expired
-// and it doesn't have any postive cache entries in it, it should be removed
-// from cache after calling |Has|.
-TEST(CachingV4, NegativeCacheExpire)
-{
-  _PrefixArray array = { GeneratePrefix(NEG_CACHE_EXPIRED_URL, 8) };
-  UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
-  FullHashResponseMap map;
-  CachedFullHashResponse* response = map.LookupOrAdd(
-    GeneratePrefix(NEG_CACHE_EXPIRED_URL, PREFIX_SIZE));
-
-  response->negativeCacheExpirySec = EXPIRED_TIME_SEC;
-
-  cache->AddFullHashResponseToCache(map);
-
-  // The first time we should found it in the cache but the result is not
-  // confirmed(because it is expired).
-  TestCache(NEG_CACHE_EXPIRED_URL, true, false, true, cache.get());
-
-  // The second time it should not be found in the cache again
-  TestCache(NEG_CACHE_EXPIRED_URL, true, false, false, cache.get());
-}
-
-// This testcase check we only lookup cache with 4-bytes prefix
-TEST(CachingV4, Ensure4BytesLookup)
-{
-  _PrefixArray array = { GeneratePrefix(CACHED_URL, 8) };
-  UniquePtr<LookupCacheV4> cache = SetupLookupCacheV4(array);
-
-  FullHashResponseMap map;
-  CachedFullHashResponse* response = map.LookupOrAdd(
-    GeneratePrefix(CACHED_URL, 5));
-
-  response->negativeCacheExpirySec = NOTEXPIRED_TIME_SEC;
-  response->fullHashes.Put(GeneratePrefix(CACHED_URL, COMPLETE_SIZE),
-                                          NOTEXPIRED_TIME_SEC);
-  cache->AddFullHashResponseToCache(map);
-
-  TestCache(CACHED_URL, true, false, false, cache.get());
-}
--- a/toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
@@ -154,17 +154,17 @@ public:
 
     return NS_OK;
   }
 
   NS_IMETHOD
   OnResponseParsed(uint32_t aMinWaitDuration,
                    uint32_t aNegCacheDuration) override
   {
-    VerifyDuration(aMinWaitDuration / 1000, EXPECTED_MIN_WAIT_DURATION);
+    VerifyDuration(aMinWaitDuration, EXPECTED_MIN_WAIT_DURATION);
     VerifyDuration(aNegCacheDuration, EXPECTED_NEG_CACHE_DURATION);
 
     return NS_OK;
   }
 
 private:
   void
   Verify(const nsACString& aCompleteHash,
@@ -186,17 +186,17 @@ private:
     VerifyDuration(aPerHashCacheDuration, expected.mPerHashCacheDuration);
 
     mCallbackCount++;
   }
 
   void
   VerifyDuration(uint32_t aToVerify, const MyDuration& aExpected)
   {
-    ASSERT_TRUE(aToVerify == aExpected.mSecs);
+    ASSERT_TRUE(aToVerify == (aExpected.mSecs * 1000));
   }
 
   ~MyParseCallback() {}
 
   uint32_t& mCallbackCount;
 };
 
 NS_IMPL_ISUPPORTS(MyParseCallback, nsIUrlClassifierParseFindFullHashCallback)
--- a/toolkit/components/url-classifier/tests/gtest/TestLookupCacheV4.cpp
+++ b/toolkit/components/url-classifier/tests/gtest/TestLookupCacheV4.cpp
@@ -1,42 +1,64 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "LookupCacheV4.h"
 #include "Common.h"
 
+#define GTEST_SAFEBROWSING_DIR NS_LITERAL_CSTRING("safebrowsing")
+#define GTEST_TABLE NS_LITERAL_CSTRING("gtest-malware-proto")
+
+typedef nsCString _Fragment;
+typedef nsTArray<nsCString> _PrefixArray;
+
+static UniquePtr<LookupCacheV4>
+SetupLookupCacheV4(const _PrefixArray& prefixArray)
+{
+  nsCOMPtr<nsIFile> file;
+  NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(file));
+
+  file->AppendNative(GTEST_SAFEBROWSING_DIR);
+
+  UniquePtr<LookupCacheV4> cache = MakeUnique<LookupCacheV4>(GTEST_TABLE, EmptyCString(), file);
+  nsresult rv = cache->Init();
+  EXPECT_EQ(rv, NS_OK);
+
+  PrefixStringMap map;
+  PrefixArrayToPrefixStringMap(prefixArray, map);
+  rv = cache->Build(map);
+  EXPECT_EQ(rv, NS_OK);
+
+  return Move(cache);
+}
+
 void
 TestHasPrefix(const _Fragment& aFragment, bool aExpectedHas, bool aExpectedComplete)
 {
   _PrefixArray array = { GeneratePrefix(_Fragment("bravo.com/"), 32),
                          GeneratePrefix(_Fragment("browsing.com/"), 8),
                          GeneratePrefix(_Fragment("gound.com/"), 5),
                          GeneratePrefix(_Fragment("small.com/"), 4)
                        };
 
   RunTestInNewThread([&] () -> void {
     UniquePtr<LookupCache> cache = SetupLookupCacheV4(array);
 
     Completion lookupHash;
     nsCOMPtr<nsICryptoHash> cryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID);
     lookupHash.FromPlaintext(aFragment, cryptoHash);
 
-    bool has, confirmed, fromCache;
+    bool has, fromCache;
     uint32_t matchLength;
-    // Freshness is not used in V4 so we just put dummy values here.
-    TableFreshnessMap dummy;
-    nsresult rv = cache->Has(lookupHash, dummy, 0,
-                             &has, &matchLength, &confirmed, &fromCache);
+    nsresult rv = cache->Has(lookupHash, &has, &matchLength, &fromCache);
 
     EXPECT_EQ(rv, NS_OK);
     EXPECT_EQ(has, aExpectedHas);
     EXPECT_EQ(matchLength == COMPLETE_SIZE, aExpectedComplete);
-    EXPECT_EQ(confirmed, false);
-    EXPECT_EQ(fromCache, false);
 
     cache->ClearAll();
   });
 
 }
 
 TEST(LookupCacheV4, HasComplete)
 {
--- a/toolkit/components/url-classifier/tests/gtest/moz.build
+++ b/toolkit/components/url-classifier/tests/gtest/moz.build
@@ -5,17 +5,16 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 LOCAL_INCLUDES += [
     '../..',
 ]
 
 UNIFIED_SOURCES += [
     'Common.cpp',
-    'TestCachingV4.cpp',
     'TestChunkSet.cpp',
     'TestClassifier.cpp',
     'TestFailUpdate.cpp',
     'TestFindFullHash.cpp',
     'TestLookupCacheV4.cpp',
     'TestPerProviderDirectory.cpp',
     'TestProtocolParser.cpp',
     'TestRiceDeltaDecoder.cpp',
--- a/toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
+++ b/toolkit/components/url-classifier/tests/unit/test_hashcompleter.js
@@ -350,17 +350,17 @@ function hashCompleterServer(aRequest, a
 }
 
 
 function callback(completion) {
   this._completion = completion;
 }
 
 callback.prototype = {
-  completionV2: function completion(hash, table, chunkId, trusted) {
+  completion: function completion(hash, table, chunkId, trusted) {
     do_check_true(this._completion.expectCompletion);
     if (this._completion.multipleCompletions) {
       for (let completion of this._completion.completions) {
         if (completion.hash == hash) {
           do_check_eq(JSON.stringify(hash), JSON.stringify(completion.hash));
           do_check_eq(table, completion.table);
           do_check_eq(chunkId, completion.chunkId);
 
--- a/toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
+++ b/toolkit/components/url-classifier/tests/unit/test_hashcompleter_v4.js
@@ -85,69 +85,52 @@ add_test(function test_getHashRequestV4(
                                                    [btoa(NEW_CLIENT_STATE)],
                                                    [btoa("0123"), btoa("1234567"), btoa("1111")],
                                                    1,
                                                    3);
   registerHandlerGethashV4("&$req=" + request);
   let completeFinishedCnt = 0;
 
   gCompleter.complete("0123", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completionV4(hash, table, duration, fullhashes) {
-      equal(hash, "0123");
+    completion(hash, table, chunkId) {
+      equal(hash, "01234567890123456789012345678901");
       equal(table, TEST_TABLE_DATA_V4.tableName);
-      equal(duration, 120);
-      equal(fullhashes.length, 1);
-
-      let match = fullhashes.QueryInterface(Ci.nsIArray)
-                  .queryElementAt(0, Ci.nsIFullHashMatch);
-
-      equal(match.fullHash, "01234567890123456789012345678901");
-      equal(match.cacheDuration, 8)
-      do_print("completion: " + match.fullHash + ", " + table);
+      equal(chunkId, 0);
+      do_print("completion: " + hash + ", " + table + ", " + chunkId);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
     },
   });
 
   gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completionV4(hash, table, duration, fullhashes) {
-      equal(hash, "1234567");
+    completion(hash, table, chunkId) {
+      equal(hash, "12345678901234567890123456789012");
       equal(table, TEST_TABLE_DATA_V4.tableName);
-      equal(duration, 120);
-      equal(fullhashes.length, 1);
-
-      let match = fullhashes.QueryInterface(Ci.nsIArray)
-                  .queryElementAt(0, Ci.nsIFullHashMatch);
-
-      equal(match.fullHash, "12345678901234567890123456789012");
-      equal(match.cacheDuration, 7)
-      do_print("completion: " + match.fullHash + ", " + table);
+      equal(chunkId, 0);
+      do_print("completion: " + hash + ", " + table + ", " + chunkId);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
     },
   });
 
   gCompleter.complete("1111", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-    completionV4(hash, table, duration, fullhashes) {
-      equal(hash, "1111");
-      equal(table, TEST_TABLE_DATA_V4.tableName);
-      equal(duration, 120);
-      equal(fullhashes.length, 0);
+    completion(hash, table, chunkId) {
+      ok(false, "1111 is not the prefix of " + hash);
     },
 
     completionFinished(status) {
       equal(status, Cr.NS_OK);
       completeFinishedCnt++;
       if (3 === completeFinishedCnt) {
         run_next_test();
       }
@@ -161,27 +144,21 @@ add_test(function test_minWaitDuration()
       completionFinished(status) {
         equal(status, Cr.NS_ERROR_ABORT);
       },
     });
   };
 
   let successComplete = function() {
     gCompleter.complete("1234567", TEST_TABLE_DATA_V4.gethashUrl, TEST_TABLE_DATA_V4.tableName, {
-      completionV4(hash, table, duration, fullhashes) {
-        equal(hash, "1234567");
+      completion(hash, table, chunkId) {
+        equal(hash, "12345678901234567890123456789012");
         equal(table, TEST_TABLE_DATA_V4.tableName);
-        equal(fullhashes.length, 1);
-
-        let match = fullhashes.QueryInterface(Ci.nsIArray)
-                    .queryElementAt(0, Ci.nsIFullHashMatch);
-
-        equal(match.fullHash, "12345678901234567890123456789012");
-        equal(match.cacheDuration, 7)
-        do_print("completion: " + match.fullHash + ", " + table);
+        equal(chunkId, 0);
+        do_print("completion: " + hash + ", " + table + ", " + chunkId);
       },
 
       completionFinished(status) {
         equal(status, Cr.NS_OK);
         run_next_test();
       },
     });
   };
--- a/toolkit/components/url-classifier/tests/unit/test_partial.js
+++ b/toolkit/components/url-classifier/tests/unit/test_partial.js
@@ -30,17 +30,17 @@ complete: function(partialHash, gethashU
         cb.completionFinished(Cr.NS_ERROR_FAILURE);
         return;
       }
       var results;
       if (fragments[partialHash]) {
         for (var i = 0; i < fragments[partialHash].length; i++) {
           var chunkId = fragments[partialHash][i][0];
           var hash = fragments[partialHash][i][1];
-          cb.completionV2(hash, self.tableName, chunkId);
+          cb.completion(hash, self.tableName, chunkId);
         }
       }
     cb.completionFinished(0);
   }
   var timer = new Timer(0, doCallback);
 },
 
 getHash: function(fragment)