Bug 1637169 - Vendor new application-services. r=rfkelly
authorMark Hammond <mhammond@skippinet.com.au>
Tue, 12 May 2020 07:53:39 +0000
changeset 529293 03b338d6730a4ebf812ef9b6a770d16a15ec5253
parent 529292 6b3c26acd625e3e28d21db7a923c32156046d1f0
child 529294 fe1593d02cbfb20d96eb668b117632ff186b867f
push id37409
push userapavel@mozilla.com
push dateWed, 13 May 2020 03:44:05 +0000
treeherdermozilla-central@59d4ec4ce296 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrfkelly
bugs1637169
milestone78.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1637169 - Vendor new application-services. r=rfkelly Differential Revision: https://phabricator.services.mozilla.com/D74817
.cargo/config.in
Cargo.lock
services/sync/golden_gate/Cargo.toml
third_party/rust/error-support/.cargo-checksum.json
third_party/rust/error-support/Cargo.toml
third_party/rust/sync-guid/.cargo-checksum.json
third_party/rust/sync-guid/Cargo.toml
third_party/rust/sync15-traits/.cargo-checksum.json
third_party/rust/sync15-traits/Cargo.toml
third_party/rust/webext-storage/.cargo-checksum.json
third_party/rust/webext-storage/Cargo.toml
third_party/rust/webext-storage/src/api.rs
third_party/rust/webext-storage/src/db.rs
third_party/rust/webext-storage/src/error.rs
third_party/rust/webext-storage/src/lib.rs
third_party/rust/webext-storage/src/store.rs
third_party/rust/webext-storage/src/sync/bridge.rs
third_party/rust/webext-storage/src/sync/incoming.rs
third_party/rust/webext-storage/src/sync/mod.rs
third_party/rust/webext-storage/src/sync/sync_tests.rs
toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml
--- a/.cargo/config.in
+++ b/.cargo/config.in
@@ -20,17 +20,17 @@ tag = "v0.2.4"
 [source."https://github.com/mozilla/mp4parse-rust"]
 git = "https://github.com/mozilla/mp4parse-rust"
 replace-with = "vendored-sources"
 rev = "0dc3e6e7c5371fe21f69b847f61c65fe6d6dc317"
 
 [source."https://github.com/mozilla/application-services"]
 git = "https://github.com/mozilla/application-services"
 replace-with = "vendored-sources"
-rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+rev = "7352f64601cdbf39d28831d30d246f7340ef31b3"
 
 [source."https://github.com/mozilla-spidermonkey/jsparagus"]
 git = "https://github.com/mozilla-spidermonkey/jsparagus"
 replace-with = "vendored-sources"
 rev = "9871d65530f5fc0e3d953efbf40b51e9c56c6d2a"
 
 [source."https://github.com/kvark/spirv_cross"]
 branch = "wgpu2"
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1250,17 +1250,17 @@ dependencies = [
 name = "error-chain"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3"
 
 [[package]]
 name = "error-support"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 dependencies = [
  "failure",
 ]
 
 [[package]]
 name = "euclid"
 version = "0.20.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2196,17 +2196,17 @@ source = "registry+https://github.com/ru
 checksum = "1cdb29978cc5797bd8dcc8e5bf7de604891df2a8dc576973d71a281e916db2ff"
 dependencies = [
  "adler32",
 ]
 
 [[package]]
 name = "interrupt-support"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 
 [[package]]
 name = "intl-memoizer"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9867e2d65d82936ef34217ed0f87b639a94384e93a0676158142c861c705391f"
 dependencies = [
  "type-map",
@@ -3127,17 +3127,17 @@ name = "nserror"
 version = "0.1.0"
 dependencies = [
  "nsstring",
 ]
 
 [[package]]
 name = "nss_build_common"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 
 [[package]]
 name = "nsstring"
 version = "0.1.0"
 dependencies = [
  "bitflags",
  "encoding_rs",
 ]
@@ -4271,17 +4271,17 @@ version = "0.18.0"
 source = "git+https://github.com/kvark/spirv_cross?branch=wgpu2#f252537beb139a818b4a8c854e501690c9ade20e"
 dependencies = [
  "spirv-cross-internal",
 ]
 
 [[package]]
 name = "sql-support"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 dependencies = [
  "ffi-support",
  "interrupt-support",
  "lazy_static",
  "log",
  "rusqlite",
 ]
 
@@ -4468,28 +4468,28 @@ dependencies = [
  "proc-macro2",
  "quote",
  "unicode-xid",
 ]
 
 [[package]]
 name = "sync-guid"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 dependencies = [
  "base64 0.12.0",
  "rand",
  "rusqlite",
  "serde",
 ]
 
 [[package]]
 name = "sync15-traits"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 dependencies = [
  "failure",
  "ffi-support",
  "interrupt-support",
  "log",
  "serde",
  "serde_json",
  "sync-guid",
@@ -5194,17 +5194,17 @@ dependencies = [
  "unicode-segmentation",
  "url",
  "warp",
 ]
 
 [[package]]
 name = "webext-storage"
 version = "0.1.0"
-source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
+source = "git+https://github.com/mozilla/application-services?rev=7352f64601cdbf39d28831d30d246f7340ef31b3#7352f64601cdbf39d28831d30d246f7340ef31b3"
 dependencies = [
  "error-support",
  "failure",
  "interrupt-support",
  "lazy_static",
  "log",
  "nss_build_common",
  "rusqlite",
--- a/services/sync/golden_gate/Cargo.toml
+++ b/services/sync/golden_gate/Cargo.toml
@@ -3,21 +3,21 @@ name = "golden_gate"
 description = "A bridge for wiring up Sync engines implemented in Rust"
 version = "0.1.0"
 authors = ["The Firefox Sync Developers <sync-team@mozilla.com>"]
 edition = "2018"
 
 [dependencies]
 atomic_refcell = "0.1"
 cstr = "0.1"
-interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
+interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "7352f64601cdbf39d28831d30d246f7340ef31b3" }
 log = "0.4"
 moz_task = { path = "../../../xpcom/rust/moz_task" }
 nserror = { path = "../../../xpcom/rust/nserror" }
 nsstring = { path = "../../../xpcom/rust/nsstring" }
 serde_json = "1"
 storage_variant = { path = "../../../storage/variant" }
-sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
+sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "7352f64601cdbf39d28831d30d246f7340ef31b3" }
 xpcom = { path = "../../../xpcom/rust/xpcom" }
 
 [dependencies.thin-vec]
 version = "0.1.0"
 features = ["gecko-ffi"]
--- a/third_party/rust/error-support/.cargo-checksum.json
+++ b/third_party/rust/error-support/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"9ba6f30454cfbe5cc844824a89f31b65d607df6aec569d093eb6307d902c5159","src/lib.rs":"4581b12eb58f9fb5275c7af74fbc4521b82ef224b6ba81f0e785c372ba95f8c6"},"package":null}
\ No newline at end of file
+{"files":{"Cargo.toml":"21b095cc85324ada8cf714deb719e3e892f2f5222538e063db56fde0f81bd17c","src/lib.rs":"4581b12eb58f9fb5275c7af74fbc4521b82ef224b6ba81f0e785c372ba95f8c6"},"package":null}
\ No newline at end of file
--- a/third_party/rust/error-support/Cargo.toml
+++ b/third_party/rust/error-support/Cargo.toml
@@ -1,10 +1,10 @@
 [package]
 name = "error-support"
 version = "0.1.0"
 authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
 edition = "2018"
 license = "MPL-2.0"
 
 [dependencies]
-failure = "0.1.6"
+failure = "0.1"
 
--- a/third_party/rust/sync-guid/.cargo-checksum.json
+++ b/third_party/rust/sync-guid/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"fec1d023581c5e34b5669c1b42efd11819eba4c3c29eca1f6095f6044a1fa5ae","src/lib.rs":"729e562be4e63ec7db2adc00753a019ae77c11ce82637a893ea18122580c3c98","src/rusqlite_support.rs":"827d314605d8c741efdf238a0780a891c88bc56026a3e6dcfa534772a4852fb3","src/serde_support.rs":"519b5eb59ca7be555d522f2186909db969069dc9586a5fe4047d4ec176b2368a"},"package":null}
\ No newline at end of file
+{"files":{"Cargo.toml":"206fef066f785b22aa1239362d1340b966807af10d43e611b99dad72194b23b3","src/lib.rs":"729e562be4e63ec7db2adc00753a019ae77c11ce82637a893ea18122580c3c98","src/rusqlite_support.rs":"827d314605d8c741efdf238a0780a891c88bc56026a3e6dcfa534772a4852fb3","src/serde_support.rs":"519b5eb59ca7be555d522f2186909db969069dc9586a5fe4047d4ec176b2368a"},"package":null}
\ No newline at end of file
--- a/third_party/rust/sync-guid/Cargo.toml
+++ b/third_party/rust/sync-guid/Cargo.toml
@@ -2,21 +2,21 @@
 name = "sync-guid"
 version = "0.1.0"
 authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
 license = "MPL-2.0"
 edition = "2018"
 
 [dependencies]
 rusqlite = { version = "0.23.1", optional = true }
-serde = { version = "1.0.104", optional = true }
+serde = { version = "1", optional = true }
 rand = { version = "0.7", optional = true }
 base64 = { version = "0.12.0", optional = true }
 
 [features]
 random = ["rand", "base64"]
 rusqlite_support = ["rusqlite"]
 serde_support = ["serde"]
 # By default we support serde, but not rusqlite.
 default = ["serde_support"]
 
 [dev-dependencies]
-serde_test = "1.0.104"
+serde_test = "1"
--- a/third_party/rust/sync15-traits/.cargo-checksum.json
+++ b/third_party/rust/sync15-traits/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"656c4c4af39bcf924098be33996360250f9610ee3a4090b8152b68bdad03c46e","README.md":"396105211d8ce7f40b05d8062d7ab55d99674555f3ac81c061874ae26656ed7e","src/bridged_engine.rs":"dffaea14d677bae1d95305b90b809cdb0b64e4fa889f1e3c3f4b5d85609991d6","src/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/client.rs":"6be4f550ade823fafc350c5490e031f90a4af833a9bba9739b05568464255a74","src/lib.rs":"a64802fb56b1fd066c4cfdf18874347e80fc9ef4a1975bdbbd76541b0fa1744c","src/payload.rs":"09db1a444e7893990a4f03cb16263b9c15abc9e48ec4f1343227be1b490865a5","src/request.rs":"9e656ec487e53c7485643687e605d73bb25e138056e920d6f4b7d63fc6a8c460","src/server_timestamp.rs":"43d1b98a90e55e49380a0b66c209c9eb393e2aeaa27d843a4726d93cdd4cea02","src/store.rs":"10e215dd24270b6bec10903ac1d5274ce997eb437134f43be7de44e36fb9d1e4","src/telemetry.rs":"027befb099a6fcded3457f7e566296548a0898ff613267190621856b9ef288f6"},"package":null}
\ No newline at end of file
+{"files":{"Cargo.toml":"317a24dd2667266ebed53a3a6b65b0c0840d0f96ce362a543af154ab862b0ce6","README.md":"396105211d8ce7f40b05d8062d7ab55d99674555f3ac81c061874ae26656ed7e","src/bridged_engine.rs":"dffaea14d677bae1d95305b90b809cdb0b64e4fa889f1e3c3f4b5d85609991d6","src/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/client.rs":"6be4f550ade823fafc350c5490e031f90a4af833a9bba9739b05568464255a74","src/lib.rs":"a64802fb56b1fd066c4cfdf18874347e80fc9ef4a1975bdbbd76541b0fa1744c","src/payload.rs":"09db1a444e7893990a4f03cb16263b9c15abc9e48ec4f1343227be1b490865a5","src/request.rs":"9e656ec487e53c7485643687e605d73bb25e138056e920d6f4b7d63fc6a8c460","src/server_timestamp.rs":"43d1b98a90e55e49380a0b66c209c9eb393e2aeaa27d843a4726d93cdd4cea02","src/store.rs":"10e215dd24270b6bec10903ac1d5274ce997eb437134f43be7de44e36fb9d1e4","src/telemetry.rs":"027befb099a6fcded3457f7e566296548a0898ff613267190621856b9ef288f6"},"package":null}
\ No newline at end of file
--- a/third_party/rust/sync15-traits/Cargo.toml
+++ b/third_party/rust/sync15-traits/Cargo.toml
@@ -5,16 +5,16 @@ authors = ["Thom Chiovoloni <tchiovoloni
 license = "MPL-2.0"
 edition = "2018"
 
 [features]
 random-guid = ["sync-guid/random"]
 
 [dependencies]
 sync-guid = { path = "../guid" }
-serde = { version = "1.0", features = ["derive"] }
-serde_json = "1.0"
+serde = { version = "1", features = ["derive"] }
+serde_json = "1"
 log = "0.4"
 ffi-support = "0.4"
 url = "2.1"
-failure = "0.1.6"
+failure = "0.1"
 
 interrupt-support = { path = "../interrupt" }
--- a/third_party/rust/webext-storage/.cargo-checksum.json
+++ b/third_party/rust/webext-storage/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"0ff7bf4f2ec3250d226f5ca234925fb3d292ef2d3377d89e7dda548789d6b02a","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"3e7f113899745e1d2af162a520300fc74b1b32202f69f928353854bc1f7b1b8e","src/api.rs":"56d3b1ec95723bfce295728d4dd84cc7712aadb52787a8264871cadd0849a04a","src/db.rs":"3639a0cb8310fb9ff12fcfa251b4f12d39ab69730c77b35f5483fa88747d63ed","src/error.rs":"67d9f32a58cc9232a49a3fbc43da01526eca50deffe7f9ec62f3c2667bb0baab","src/lib.rs":"01e5cc7f4a235409cc893c0275e544289f82d1eca3d93b0e81f523bbe78789f6","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"a000751ed6eafcaa87fcf44f6391f2c417fb29917b79ec2812524132c24092a8","src/sync/bridge.rs":"4f7037ab7ae6ad042933056d40f0bbc3ceb988b7dc5b0c43527ec516eb9d3550","src/sync/incoming.rs":"6e70749577b4e037b8a7baa755b8973695ca5926062c0567de8c3585b95a7b66","src/sync/mod.rs":"e2941761b0b20c3ebc6e2b6a7992eeffbab7e1bc49e113b8bd75f304f00f3878","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"7fb335ec1a2288529247761df075ccc51280653c0ca1a655115cd1a09eaea540"},"package":null}
\ No newline at end of file
+{"files":{"Cargo.toml":"d5782ce7188018b6e8fa8a99298472d403b6f11d9b7c67b0fd28acbcbdf37109","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"3e7f113899745e1d2af162a520300fc74b1b32202f69f928353854bc1f7b1b8e","src/api.rs":"c91c8e1fb01e08df8873e51c93842206af6ff839c46c9f5255be17d49a3fdef2","src/db.rs":"e04f19ab2e3da4423ce49d43b6c9a6d86be4e573f54421061bea04ef875afb2a","src/error.rs":"c956152633ad6c787f8b7322b619f807354d4c3cb2ecc35c549c3b4bcd98079e","src/lib.rs":"62aa85ab62d91caa07d58987260f52329c7ab793224e8a1d30e07ce446263d48","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"035cca3ebf7311cd9c08b8027fd10c113159c86300eb4a8e1fc8434dd56afd3b","src/sync/bridge.rs":"e60fec0f8f167f893bb2055f4623c9cc4dc6921acafd13dcc4e6cfd228b3cb42","src/sync/incoming.rs":"76e494dbe0583bdc3cb9567cbfd202072e80304dded75da9b57c7662e489bc2e","src/sync/mod.rs":"c9ef7561b3ba898e5f5036efc15bcbc95a2975cabbf82ef1d1623349f4897ce7","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"e2c665046f3ad2a665eee2b5b33a89ae8804af42bf93fe7b2694280eb5b2a9cc"},"package":null}
\ No newline at end of file
--- a/third_party/rust/webext-storage/Cargo.toml
+++ b/third_party/rust/webext-storage/Cargo.toml
@@ -6,34 +6,34 @@ authors = ["sync-team@mozilla.com"]
 license = "MPL-2.0"
 
 [features]
 log_query_plans = ["sql-support/log_query_plans"]
 default = []
 
 [dependencies]
 error-support = { path = "../support/error" }
-failure = "0.1.6"
+failure = "0.1"
 interrupt-support = { path = "../support/interrupt" }
 lazy_static = "1.4.0"
 log = "0.4"
 serde = "1"
 serde_json = "1"
 serde_derive = "1"
 sql-support = { path = "../support/sql" }
 sync15-traits = {path = "../support/sync15-traits"}
 sync-guid = { path = "../support/guid", features = ["rusqlite_support", "random"] }
 url = { version = "2.1", features = ["serde"] }
 
 [dependencies.rusqlite]
 version = "0.23.1"
 features = ["functions", "bundled", "serde_json"]
 
 [dev-dependencies]
-env_logger = "0.7.0"
+env_logger = "0.7"
 prettytable-rs = "0.8"
 
 # A *direct* dep on the -sys crate is required for our build.rs
 # to see the DEP_SQLITE3_LINK_TARGET env var that cargo sets
 # on its behalf.
 libsqlite3-sys = "0.18.0"
 
 [build-dependencies]
--- a/third_party/rust/webext-storage/src/api.rs
+++ b/third_party/rust/webext-storage/src/api.rs
@@ -4,20 +4,25 @@
 
 use crate::error::*;
 use rusqlite::{Connection, Transaction};
 use serde::{ser::SerializeMap, Serialize, Serializer};
 
 use serde_json::{Map, Value as JsonValue};
 use sql_support::{self, ConnExt};
 
-// These constants are defined by the chrome.storage.sync spec.
-const QUOTA_BYTES: usize = 102_400;
-const QUOTA_BYTES_PER_ITEM: usize = 8_192;
-const MAX_ITEMS: usize = 512;
+// These constants are defined by the chrome.storage.sync spec. We export them
+// publicly from this module, then from the crate, so they wind up in the
+// clients.
+// Note the limits for `chrome.storage.sync` and `chrome.storage.local` are
+// different, and these are from `.sync` - we'll have work to do if we end up
+// wanting this to be used for `.local` too!
+pub const SYNC_QUOTA_BYTES: usize = 102_400;
+pub const SYNC_QUOTA_BYTES_PER_ITEM: usize = 8_192;
+pub const SYNC_MAX_ITEMS: usize = 512;
 // Note there are also constants for "operations per minute" etc, which aren't
 // enforced here.
 
 type JsonMap = Map<String, JsonValue>;
 
 fn get_from_db(conn: &Connection, ext_id: &str) -> Result<Option<JsonMap>> {
     Ok(
         match conn.try_query_one::<String>(
@@ -76,17 +81,17 @@ fn save_to_db(tx: &Transaction<'_>, ext_
                 rusqlite::named_params! {
                     ":ext_id": ext_id,
                 },
             )?;
         }
     } else {
         // Convert to bytes so we can enforce the quota.
         let sval = val.to_string();
-        if sval.len() > QUOTA_BYTES {
+        if sval.len() > SYNC_QUOTA_BYTES {
             return Err(ErrorKind::QuotaError(QuotaReason::TotalBytes).into());
         }
         log::trace!("saving data for '{}': writing", ext_id);
         tx.execute_named_cached(
             "INSERT INTO storage_sync_data(ext_id, data, sync_change_counter)
                 VALUES (:ext_id, :data, 1)
                 ON CONFLICT (ext_id) DO UPDATE
                 set data=:data, sync_change_counter = sync_change_counter + 1",
@@ -156,16 +161,24 @@ impl Serialize for StorageChanges {
         let mut map = serializer.serialize_map(Some(self.changes.len()))?;
         for change in &self.changes {
             map.serialize_entry(&change.key, change)?;
         }
         map.end()
     }
 }
 
+// A helper to determine the size of a key/value combination from the
+// perspective of quota and getBytesInUse().
+pub fn get_quota_size_of(key: &str, v: &JsonValue) -> usize {
+    // Reading the chrome docs literally re the quota, the length of the key
+    // is just the string len, but the value is the json val, as bytes.
+    key.len() + v.to_string().len()
+}
+
 /// The implementation of `storage[.sync].set()`. On success this returns the
 /// StorageChanges defined by the chrome API - it's assumed the caller will
 /// arrange to deliver this to observers as defined in that API.
 pub fn set(tx: &Transaction<'_>, ext_id: &str, val: JsonValue) -> Result<StorageChanges> {
     let val_map = match val {
         JsonValue::Object(m) => m,
         // Not clear what the error semantics should be yet. For now, pretend an empty map.
         _ => Map::new(),
@@ -173,22 +186,22 @@ pub fn set(tx: &Transaction<'_>, ext_id:
 
     let mut current = get_from_db(tx, ext_id)?.unwrap_or_default();
 
     let mut changes = StorageChanges::with_capacity(val_map.len());
 
     // iterate over the value we are adding/updating.
     for (k, v) in val_map.into_iter() {
         let old_value = current.remove(&k);
-        if current.len() >= MAX_ITEMS {
+        if current.len() >= SYNC_MAX_ITEMS {
             return Err(ErrorKind::QuotaError(QuotaReason::MaxItems).into());
         }
         // Reading the chrome docs literally re the quota, the length of the key
         // is just the string len, but the value is the json val, as bytes
-        if k.len() + v.to_string().len() >= QUOTA_BYTES_PER_ITEM {
+        if get_quota_size_of(&k, &v) > SYNC_QUOTA_BYTES_PER_ITEM {
             return Err(ErrorKind::QuotaError(QuotaReason::ItemBytes).into());
         }
         let change = StorageValueChange {
             key: k.clone(),
             old_value,
             new_value: Some(v.clone()),
         };
         changes.push(change);
@@ -289,33 +302,41 @@ pub fn clear(tx: &Transaction<'_>, ext_i
             new_value: None,
             old_value: Some(val),
         });
     }
     remove_from_db(tx, ext_id)?;
     Ok(result)
 }
 
-/// While this API isn't available to extensions, Firefox wants a way to wipe
-/// all data for all addons but not sync the deletions. We also don't report
-/// the changes caused by the deletion.
-/// That means that after doing this, the next sync is likely to drag some data
-/// back in - which is fine.
-/// This is much like what the sync support for other components calls a "wipe",
-/// so we name it similarly.
-pub fn wipe_all(tx: &Transaction<'_>) -> Result<()> {
-    // We assume the meta table is only used by sync.
-    tx.execute_batch(
-        "DELETE FROM storage_sync_data; DELETE FROM storage_sync_mirror; DELETE FROM meta;",
-    )?;
-    Ok(())
+/// The implementation of `storage[.sync].getBytesInUse()`.
+pub fn get_bytes_in_use(conn: &Connection, ext_id: &str, keys: JsonValue) -> Result<usize> {
+    let maybe_existing = get_from_db(conn, ext_id)?;
+    let existing = match maybe_existing {
+        None => return Ok(0),
+        Some(v) => v,
+    };
+    // Make an array of all the keys we we are going to count.
+    let keys: Vec<&str> = match &keys {
+        JsonValue::Null => existing.keys().map(|v| v.as_str()).collect(),
+        JsonValue::String(name) => vec![name.as_str()],
+        JsonValue::Array(names) => names.iter().filter_map(|v| v.as_str()).collect(),
+        // in the spirit of json-based APIs, silently ignore strange things.
+        _ => return Ok(0),
+    };
+    // We must use the same way of counting as our quota enforcement.
+    let mut size = 0;
+    for key in keys.into_iter() {
+        if let Some(v) = existing.get(key) {
+            size += get_quota_size_of(key, &v);
+        }
+    }
+    Ok(size)
 }
 
-// TODO - get_bytes_in_use()
-
 #[cfg(test)]
 mod tests {
     use super::*;
     use crate::db::test::new_mem_db;
     use serde_json::json;
 
     #[test]
     fn test_serialize_storage_changes() -> Result<()> {
@@ -504,17 +525,17 @@ mod tests {
         Ok(())
     }
 
     #[test]
     fn test_quota_maxitems() -> Result<()> {
         let mut db = new_mem_db();
         let tx = db.transaction()?;
         let ext_id = "xyz";
-        for i in 1..MAX_ITEMS + 1 {
+        for i in 1..SYNC_MAX_ITEMS + 1 {
             set(
                 &tx,
                 &ext_id,
                 json!({ format!("key-{}", i): format!("value-{}", i) }),
             )?;
         }
         let e = set(&tx, &ext_id, json!({"another": "another"})).unwrap_err();
         match e.kind() {
@@ -525,56 +546,60 @@ mod tests {
     }
 
     #[test]
     fn test_quota_bytesperitem() -> Result<()> {
         let mut db = new_mem_db();
         let tx = db.transaction()?;
         let ext_id = "xyz";
         // A string 5 bytes less than the max. This should be counted as being
-        // 3 bytes less than the max as the quotes are counted.
-        let val = "x".repeat(QUOTA_BYTES_PER_ITEM - 5);
+        // 3 bytes less than the max as the quotes are counted. Plus the length
+        // of the key (no quotes) means we should come in 2 bytes under.
+        let val = "x".repeat(SYNC_QUOTA_BYTES_PER_ITEM - 5);
 
         // Key length doesn't push it over.
         set(&tx, &ext_id, json!({ "x": val }))?;
+        assert_eq!(
+            get_bytes_in_use(&tx, &ext_id, json!("x"))?,
+            SYNC_QUOTA_BYTES_PER_ITEM - 2
+        );
 
         // Key length does push it over.
         let e = set(&tx, &ext_id, json!({ "xxxx": val })).unwrap_err();
         match e.kind() {
             ErrorKind::QuotaError(QuotaReason::ItemBytes) => {}
             _ => panic!("unexpected error type"),
         };
         Ok(())
     }
 
-    fn query_count(conn: &Connection, table: &str) -> u32 {
-        conn.query_row_and_then(
-            &format!("SELECT COUNT(*) FROM {};", table),
-            rusqlite::NO_PARAMS,
-            |row| row.get::<_, u32>(0),
-        )
-        .expect("should work")
-    }
-
     #[test]
-    fn test_wipe() -> Result<()> {
-        use crate::db::put_meta;
-
+    fn test_get_bytes_in_use() -> Result<()> {
         let mut db = new_mem_db();
         let tx = db.transaction()?;
-        set(&tx, "ext-a", json!({ "x": "y" }))?;
-        set(&tx, "ext-b", json!({ "y": "x" }))?;
-        put_meta(&tx, "meta", &"meta-meta".to_string())?;
-        tx.execute(
-            "INSERT INTO storage_sync_mirror (guid, ext_id, data)
-                    VALUES ('guid', 'ext-a', null)",
-            rusqlite::NO_PARAMS,
-        )?;
-        assert_eq!(query_count(&tx, "storage_sync_data"), 2);
-        assert_eq!(query_count(&tx, "storage_sync_mirror"), 1);
-        assert_eq!(query_count(&tx, "meta"), 1);
-        wipe_all(&tx)?;
-        assert_eq!(query_count(&tx, "storage_sync_data"), 0);
-        assert_eq!(query_count(&tx, "storage_sync_mirror"), 0);
-        assert_eq!(query_count(&tx, "meta"), 0);
+        let ext_id = "xyz";
+
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(null))?, 0);
+
+        set(&tx, &ext_id, json!({ "a": "a" }))?; // should be 4
+        set(&tx, &ext_id, json!({ "b": "bb" }))?; // should be 5
+        set(&tx, &ext_id, json!({ "c": "ccc" }))?; // should be 6
+        set(&tx, &ext_id, json!({ "n": 999_999 }))?; // should be 7
+
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!("x"))?, 0);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!("a"))?, 4);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!("b"))?, 5);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!("c"))?, 6);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!("n"))?, 7);
+
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(["a"]))?, 4);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(["a", "x"]))?, 4);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(["a", "b"]))?, 9);
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(["a", "c"]))?, 10);
+
+        assert_eq!(
+            get_bytes_in_use(&tx, &ext_id, json!(["a", "b", "c", "n"]))?,
+            22
+        );
+        assert_eq!(get_bytes_in_use(&tx, &ext_id, json!(null))?, 22);
         Ok(())
     }
 }
--- a/third_party/rust/webext-storage/src/db.rs
+++ b/third_party/rust/webext-storage/src/db.rs
@@ -167,36 +167,33 @@ pub(crate) mod sql_fns {
 
     #[inline(never)]
     pub fn generate_guid(_ctx: &Context<'_>) -> Result<SyncGuid> {
         Ok(SyncGuid::random())
     }
 }
 
 // These should be somewhere else...
-#[allow(dead_code)]
 pub fn put_meta(db: &Connection, key: &str, value: &dyn ToSql) -> Result<()> {
     db.conn().execute_named_cached(
         "REPLACE INTO meta (key, value) VALUES (:key, :value)",
         &[(":key", &key), (":value", value)],
     )?;
     Ok(())
 }
 
-#[allow(dead_code)]
 pub fn get_meta<T: FromSql>(db: &Connection, key: &str) -> Result<Option<T>> {
     let res = db.conn().try_query_one(
         "SELECT value FROM meta WHERE key = :key",
         &[(":key", &key)],
         true,
     )?;
     Ok(res)
 }
 
-#[allow(dead_code)]
 pub fn delete_meta(db: &Connection, key: &str) -> Result<()> {
     db.conn()
         .execute_named_cached("DELETE FROM meta WHERE key = :key", &[(":key", &key)])?;
     Ok(())
 }
 
 // Utilities for working with paths.
 // (From places_utils - ideally these would be shared, but the use of
--- a/third_party/rust/webext-storage/src/error.rs
+++ b/third_party/rust/webext-storage/src/error.rs
@@ -51,19 +51,16 @@ pub enum ErrorKind {
     #[fail(display = "Database cannot be upgraded")]
     DatabaseUpgradeError,
 
     #[fail(display = "Database version {} is not supported", _0)]
     UnsupportedDatabaseVersion(i64),
 
     #[fail(display = "{}", _0)]
     IncomingPayloadError(#[fail(cause)] bridged_engine::PayloadError),
-
-    #[fail(display = "This operation isn't implemented yet")]
-    NotImplemented,
 }
 
 error_support::define_error! {
     ErrorKind {
         (JsonError, serde_json::Error),
         (SqlError, rusqlite::Error),
         (IoError, std::io::Error),
         (InterruptedError, Interrupted),
--- a/third_party/rust/webext-storage/src/lib.rs
+++ b/third_party/rust/webext-storage/src/lib.rs
@@ -7,23 +7,14 @@
 
 mod api;
 mod db;
 pub mod error;
 mod schema;
 pub mod store;
 mod sync;
 
-// This is what we roughly expect the "bridge" used by desktop to do.
-// It's primarily here to avoid dead-code warnings (but I don't want to disable
-// those warning, as stuff that remains after this is suspect!)
-pub fn delme_demo_usage() -> error::Result<()> {
-    use serde_json::json;
+// We publish some constants from non-public modules.
+pub use sync::STORAGE_VERSION;
 
-    let store = store::Store::new("webext-storage.db")?;
-    store.set("ext-id", json!({}))?;
-    store.get("ext-id", json!({}))?;
-    store.remove("ext-id", json!({}))?;
-    store.clear("ext-id")?;
-    // and it might even...
-    store.wipe_all()?;
-    Ok(())
-}
+pub use api::SYNC_MAX_ITEMS;
+pub use api::SYNC_QUOTA_BYTES;
+pub use api::SYNC_QUOTA_BYTES_PER_ITEM;
--- a/third_party/rust/webext-storage/src/store.rs
+++ b/third_party/rust/webext-storage/src/store.rs
@@ -95,23 +95,20 @@ impl Store {
     /// deleted key.
     pub fn clear(&self, ext_id: &str) -> Result<StorageChanges> {
         let tx = self.db.unchecked_transaction()?;
         let result = api::clear(&tx, ext_id)?;
         tx.commit()?;
         Ok(result)
     }
 
-    /// Wipe all local data without syncing or returning any information about
-    /// the deletion.
-    pub fn wipe_all(&self) -> Result<()> {
-        let tx = self.db.unchecked_transaction()?;
-        api::wipe_all(&tx)?;
-        tx.commit()?;
-        Ok(())
+    /// Returns the bytes in use for the specified items (which can be null,
+    /// a string, or an array)
+    pub fn get_bytes_in_use(&self, ext_id: &str, keys: JsonValue) -> Result<usize> {
+        api::get_bytes_in_use(&self.db, ext_id, keys)
     }
 
     /// Returns a bridged sync engine for Desktop for this store.
     pub fn bridged_engine(&self) -> sync::BridgedEngine<'_> {
         sync::BridgedEngine::new(&self.db)
     }
 
     /// Closes the store and its database connection. See the docs for
--- a/third_party/rust/webext-storage/src/sync/bridge.rs
+++ b/third_party/rust/webext-storage/src/sync/bridge.rs
@@ -1,58 +1,87 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+use rusqlite::Transaction;
 use sync15_traits::{self, ApplyResults, IncomingEnvelope, OutgoingEnvelope};
 use sync_guid::Guid as SyncGuid;
 
-use crate::api;
-use crate::db::StorageDb;
-use crate::error::{Error, ErrorKind, Result};
+use crate::db::{delete_meta, get_meta, put_meta, StorageDb};
+use crate::error::{Error, Result};
 use crate::schema;
 use crate::sync::incoming::{apply_actions, get_incoming, plan_incoming, stage_incoming};
 use crate::sync::outgoing::{get_outgoing, record_uploaded, stage_outgoing};
 
+const LAST_SYNC_META_KEY: &str = "last_sync_time";
+const SYNC_ID_META_KEY: &str = "sync_id";
+
 /// A bridged engine implements all the methods needed to make the
 /// `storage.sync` store work with Desktop's Sync implementation.
 /// Conceptually, it's similar to `sync15_traits::Store`, which we
 /// should eventually rename and unify with this trait (#2841).
 pub struct BridgedEngine<'a> {
     db: &'a StorageDb,
 }
 
 impl<'a> BridgedEngine<'a> {
     /// Creates a bridged engine for syncing.
     pub fn new(db: &'a StorageDb) -> Self {
         BridgedEngine { db }
     }
+
+    fn do_reset(&self, tx: &Transaction<'_>) -> Result<()> {
+        tx.execute_batch(
+            "DELETE FROM storage_sync_mirror;
+             UPDATE storage_sync_data SET sync_change_counter = 1;",
+        )?;
+        delete_meta(tx, LAST_SYNC_META_KEY)?;
+        Ok(())
+    }
 }
 
 impl<'a> sync15_traits::BridgedEngine for BridgedEngine<'a> {
     type Error = Error;
 
     fn last_sync(&self) -> Result<i64> {
-        Err(ErrorKind::NotImplemented.into())
+        Ok(get_meta(self.db, LAST_SYNC_META_KEY)?.unwrap_or(0))
     }
 
-    fn set_last_sync(&self, _last_sync_millis: i64) -> Result<()> {
-        Err(ErrorKind::NotImplemented.into())
+    fn set_last_sync(&self, last_sync_millis: i64) -> Result<()> {
+        put_meta(self.db, LAST_SYNC_META_KEY, &last_sync_millis)?;
+        Ok(())
     }
 
     fn sync_id(&self) -> Result<Option<String>> {
-        Err(ErrorKind::NotImplemented.into())
+        Ok(get_meta(self.db, SYNC_ID_META_KEY)?)
     }
 
     fn reset_sync_id(&self) -> Result<String> {
-        Err(ErrorKind::NotImplemented.into())
+        let tx = self.db.unchecked_transaction()?;
+        let new_id = SyncGuid::random().to_string();
+        self.do_reset(&tx)?;
+        put_meta(self.db, SYNC_ID_META_KEY, &new_id)?;
+        tx.commit()?;
+        Ok(new_id)
     }
 
-    fn ensure_current_sync_id(&self, _new_sync_id: &str) -> Result<String> {
-        Err(ErrorKind::NotImplemented.into())
+    fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
+        let current: Option<String> = get_meta(self.db, SYNC_ID_META_KEY)?;
+        Ok(match current {
+            Some(current) if current == sync_id => current,
+            _ => {
+                let tx = self.db.unchecked_transaction()?;
+                self.do_reset(&tx)?;
+                let result = sync_id.to_string();
+                put_meta(self.db, SYNC_ID_META_KEY, &result)?;
+                tx.commit()?;
+                result
+            }
+        })
     }
 
     fn sync_started(&self) -> Result<()> {
         schema::create_empty_sync_temp_tables(&self.db)?;
         Ok(())
     }
 
     fn store_incoming(&self, incoming_envelopes: &[IncomingEnvelope]) -> Result<()> {
@@ -100,18 +129,192 @@ impl<'a> sync15_traits::BridgedEngine fo
     }
 
     fn sync_finished(&self) -> Result<()> {
         schema::create_empty_sync_temp_tables(&self.db)?;
         Ok(())
     }
 
     fn reset(&self) -> Result<()> {
-        Err(ErrorKind::NotImplemented.into())
+        let tx = self.db.unchecked_transaction()?;
+        self.do_reset(&tx)?;
+        delete_meta(&tx, SYNC_ID_META_KEY)?;
+        tx.commit()?;
+        Ok(())
     }
 
     fn wipe(&self) -> Result<()> {
         let tx = self.db.unchecked_transaction()?;
-        api::wipe_all(&tx)?;
+        // We assume the meta table is only used by sync.
+        tx.execute_batch(
+            "DELETE FROM storage_sync_data; DELETE FROM storage_sync_mirror; DELETE FROM meta;",
+        )?;
         tx.commit()?;
         Ok(())
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::db::test::new_mem_db;
+    use sync15_traits::bridged_engine::BridgedEngine;
+
+    fn query_count(conn: &StorageDb, table: &str) -> u32 {
+        conn.query_row_and_then(
+            &format!("SELECT COUNT(*) FROM {};", table),
+            rusqlite::NO_PARAMS,
+            |row| row.get::<_, u32>(0),
+        )
+        .expect("should work")
+    }
+
+    // Sets up mock data for the tests here.
+    fn setup_mock_data(engine: &super::BridgedEngine<'_>) -> Result<()> {
+        engine.db.execute(
+            "INSERT INTO storage_sync_data (ext_id, data, sync_change_counter)
+                 VALUES ('ext-a', 'invalid-json', 2)",
+            rusqlite::NO_PARAMS,
+        )?;
+        engine.db.execute(
+            "INSERT INTO storage_sync_mirror (guid, ext_id, data)
+                 VALUES ('guid', 'ext-a', null)",
+            rusqlite::NO_PARAMS,
+        )?;
+        engine.set_last_sync(1)?;
+
+        // and assert we wrote what we think we did.
+        assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
+        assert_eq!(query_count(engine.db, "storage_sync_mirror"), 1);
+        assert_eq!(query_count(engine.db, "meta"), 1);
+        Ok(())
+    }
+
+    // Assuming a DB setup with setup_mock_data, assert it was correctly reset.
+    fn assert_reset(engine: &super::BridgedEngine<'_>) -> Result<()> {
+        // A reset never wipes data...
+        assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
+
+        // But did reset the change counter.
+        let cc = engine.db.query_row_and_then(
+            "SELECT sync_change_counter FROM storage_sync_data WHERE ext_id = 'ext-a';",
+            rusqlite::NO_PARAMS,
+            |row| row.get::<_, u32>(0),
+        )?;
+        assert_eq!(cc, 1);
+        // But did wipe the mirror...
+        assert_eq!(query_count(engine.db, "storage_sync_mirror"), 0);
+        // And the last_sync should have been wiped.
+        assert!(get_meta::<i64>(engine.db, LAST_SYNC_META_KEY)?.is_none());
+        Ok(())
+    }
+
+    // Assuming a DB setup with setup_mock_data, assert it has not been reset.
+    fn assert_not_reset(engine: &super::BridgedEngine<'_>) -> Result<()> {
+        assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
+        let cc = engine.db.query_row_and_then(
+            "SELECT sync_change_counter FROM storage_sync_data WHERE ext_id = 'ext-a';",
+            rusqlite::NO_PARAMS,
+            |row| row.get::<_, u32>(0),
+        )?;
+        assert_eq!(cc, 2);
+        assert_eq!(query_count(engine.db, "storage_sync_mirror"), 1);
+        // And the last_sync should remain.
+        assert!(get_meta::<i64>(engine.db, LAST_SYNC_META_KEY)?.is_some());
+        Ok(())
+    }
+
+    #[test]
+    fn test_wipe() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+
+        engine.wipe()?;
+        assert_eq!(query_count(engine.db, "storage_sync_data"), 0);
+        assert_eq!(query_count(engine.db, "storage_sync_mirror"), 0);
+        assert_eq!(query_count(engine.db, "meta"), 0);
+        Ok(())
+    }
+
+    #[test]
+    fn test_reset() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+        put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
+
+        engine.reset()?;
+        assert_reset(&engine)?;
+        // Only an explicit reset kills the sync-id, so check that here.
+        assert_eq!(get_meta::<String>(engine.db, SYNC_ID_META_KEY)?, None);
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_ensure_missing_sync_id() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+
+        assert_eq!(engine.sync_id()?, None);
+        // We don't have a sync ID - so setting one should reset.
+        engine.ensure_current_sync_id("new-id")?;
+        // should have cause a reset.
+        assert_reset(&engine)?;
+        Ok(())
+    }
+
+    #[test]
+    fn test_ensure_new_sync_id() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+
+        put_meta(engine.db, SYNC_ID_META_KEY, &"old-id".to_string())?;
+        assert_not_reset(&engine)?;
+        assert_eq!(engine.sync_id()?, Some("old-id".to_string()));
+
+        engine.ensure_current_sync_id("new-id")?;
+        // should have cause a reset.
+        assert_reset(&engine)?;
+        // should have the new id.
+        assert_eq!(engine.sync_id()?, Some("new-id".to_string()));
+        Ok(())
+    }
+
+    #[test]
+    fn test_ensure_same_sync_id() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+        assert_not_reset(&engine)?;
+
+        put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
+
+        engine.ensure_current_sync_id("sync-id")?;
+        // should not have reset.
+        assert_not_reset(&engine)?;
+        Ok(())
+    }
+
+    #[test]
+    fn test_reset_sync_id() -> Result<()> {
+        let db = new_mem_db();
+        let engine = super::BridgedEngine::new(&db);
+
+        setup_mock_data(&engine)?;
+        put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
+
+        assert_eq!(engine.sync_id()?, Some("sync-id".to_string()));
+        let new_id = engine.reset_sync_id()?;
+        // should have cause a reset.
+        assert_reset(&engine)?;
+        assert_eq!(engine.sync_id()?, Some(new_id));
+        Ok(())
+    }
+}
--- a/third_party/rust/webext-storage/src/sync/incoming.rs
+++ b/third_party/rust/webext-storage/src/sync/incoming.rs
@@ -8,17 +8,17 @@
 use interrupt_support::Interruptee;
 use rusqlite::{types::ToSql, Connection, Row, Transaction};
 use sql_support::ConnExt;
 use sync15_traits::Payload;
 use sync_guid::Guid as SyncGuid;
 
 use crate::error::*;
 
-use super::{merge, JsonMap, Record};
+use super::{merge, remove_matching_keys, JsonMap, Record};
 
 /// The state data can be in. Could be represented as Option<JsonMap>, but this
 /// is clearer and independent of how the data is stored.
 #[derive(Debug, PartialEq)]
 pub enum DataState {
     /// The data was deleted.
     Deleted,
     /// Data exists, as stored in the map.
@@ -211,44 +211,68 @@ pub fn plan_incoming(s: IncomingState) -
                     merge(incoming_data, local_data, None)
                 }
                 (DataState::Exists(incoming_data), DataState::Deleted, _) => {
                     // Incoming data, removed locally. Server wins.
                     IncomingAction::TakeRemote {
                         data: incoming_data,
                     }
                 }
-                (DataState::Deleted, _, _) => {
-                    // Deleted remotely. Server wins.
-                    // XXX - WRONG - we want to 3 way merge here still!
-                    // Eg, final key removed remotely, different key added
-                    // locally, the new key should still be added.
+                (DataState::Deleted, DataState::Exists(local_data), DataState::Exists(mirror)) => {
+                    // Deleted remotely.
+                    // Treat this as a delete of every key that we
+                    // know was present at the time.
+                    let result = remove_matching_keys(local_data, &mirror);
+                    if result.is_empty() {
+                        // If there were no more keys left, we can
+                        // delete our version too.
+                        IncomingAction::DeleteLocally
+                    } else {
+                        IncomingAction::Merge { data: result }
+                    }
+                }
+                (DataState::Deleted, DataState::Exists(local_data), DataState::Deleted) => {
+                    // Perhaps another client created and then deleted
+                    // the whole object for this extension since the
+                    // last time we synced.
+                    // Treat this as a delete of every key that we
+                    // knew was present. Unfortunately, we don't know
+                    // any keys that were present, so we delete no keys.
+                    IncomingAction::Merge { data: local_data }
+                }
+                (DataState::Deleted, DataState::Deleted, _) => {
+                    // We agree with the remote (regardless of what we
+                    // have mirrored).
                     IncomingAction::DeleteLocally
                 }
             }
         }
         IncomingState::HasLocal { incoming, local } => {
             // So we have a local record and an incoming/staging record, but *not* a
             // mirror record. This means some other device has synced this for
             // the first time and we are yet to do the same.
             match (incoming, local) {
                 (DataState::Exists(incoming_data), DataState::Exists(local_data)) => {
                     // This means the extension exists locally and remotely
                     // but this is the first time we've synced it. That's no problem, it's
                     // just a 2-way merge...
                     merge(incoming_data, local_data, None)
                 }
-                (DataState::Exists(_), DataState::Deleted) => {
+                (DataState::Deleted, DataState::Exists(local_data)) => {
                     // We've data locally, but there's an incoming deletion.
-                    // Remote wins.
-                    IncomingAction::DeleteLocally
+                    // We would normally remove keys that we knew were
+                    // present on the server, but we don't know what
+                    // was on the server, so we don't remove anything.
+                    IncomingAction::Merge { data: local_data }
                 }
-                (DataState::Deleted, DataState::Exists(local_data)) => {
+                (DataState::Exists(incoming_data), DataState::Deleted) => {
                     // No data locally, but some is incoming - take it.
-                    IncomingAction::TakeRemote { data: local_data }
+                    IncomingAction::TakeRemote {
+                        data: incoming_data,
+                    }
                 }
                 (DataState::Deleted, DataState::Deleted) => {
                     // Nothing anywhere - odd, but OK.
                     IncomingAction::Same
                 }
             }
         }
         IncomingState::NotLocal { incoming, .. } => {
@@ -287,21 +311,22 @@ pub fn apply_actions(
                 tx.execute_named_cached(
                     "DELETE FROM storage_sync_data WHERE ext_id = :ext_id",
                     &[(":ext_id", &item.ext_id)],
                 )?;
             }
             // We want to update the local record with 'data' and after this update the item no longer is considered dirty.
             IncomingAction::TakeRemote { data } => {
                 tx.execute_named_cached(
-                    "UPDATE storage_sync_data SET data = :data, sync_change_counter = 0 WHERE ext_id = :ext_id",
+                    "INSERT OR REPLACE INTO storage_sync_data(ext_id, data, sync_change_counter)
+                        VALUES (:ext_id, :data, 0)",
                     &[
                         (":ext_id", &item.ext_id),
                         (":data", &serde_json::Value::Object(data)),
-                    ]
+                    ],
                 )?;
             }
 
             // We merged this data, so need to update locally but still consider
             // it dirty because the merged data must be uploaded.
             IncomingAction::Merge { data } => {
                 tx.execute_named_cached(
                     "UPDATE storage_sync_data SET data = :data, sync_change_counter = sync_change_counter + 1 WHERE ext_id = :ext_id",
--- a/third_party/rust/webext-storage/src/sync/mod.rs
+++ b/third_party/rust/webext-storage/src/sync/mod.rs
@@ -12,16 +12,18 @@ mod sync_tests;
 use serde_derive::*;
 use sync_guid::Guid as SyncGuid;
 
 pub use bridge::BridgedEngine;
 use incoming::IncomingAction;
 
 type JsonMap = serde_json::Map<String, serde_json::Value>;
 
+pub const STORAGE_VERSION: usize = 1;
+
 /// For use with `#[serde(skip_serializing_if = )]`
 #[inline]
 pub fn is_default<T: PartialEq + Default>(v: &T) -> bool {
     *v == T::default()
 }
 
 #[derive(Debug, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
@@ -29,68 +31,76 @@ pub struct Record {
     #[serde(rename = "id")]
     guid: SyncGuid,
     ext_id: String,
     #[serde(default, skip_serializing_if = "is_default")]
     data: Option<String>,
 }
 
 // Perform a 2-way or 3-way merge, where the incoming value wins on confict.
-// XXX - this needs more thought, and probably needs significant changes.
-// Main problem is that it doesn't handle deletions - but to do that, we need
-// something other than a simple Option<JsonMap> - we need to differentiate
-// "doesn't exist" from "removed".
-// TODO!
-fn merge(other: JsonMap, mut ours: JsonMap, parent: Option<JsonMap>) -> IncomingAction {
+fn merge(mut other: JsonMap, mut ours: JsonMap, parent: Option<JsonMap>) -> IncomingAction {
     if other == ours {
         return IncomingAction::Same;
     }
-    // Server wins. Iterate over incoming - if incoming and the parent are
-    // identical, then we will take our local value.
-    for (key, incoming_value) in other.into_iter() {
-        let our_value = ours.get(&key);
-        match our_value {
-            Some(our_value) => {
-                if *our_value != incoming_value {
-                    // So we have a discrepency between 'ours' and 'other' - use parent
-                    // to resolve.
-                    let can_take_local = match parent {
-                        Some(ref pm) => {
-                            if let Some(pv) = pm.get(&key) {
-                                // parent has a value - we can only take our local
-                                // value if the parent and incoming have the same.
-                                *pv == incoming_value
-                            } else {
-                                // Value doesn't exist in the parent - can't take local
-                                false
-                            }
-                        }
-                        None => {
-                            // 2 way merge because there's no parent. We always
-                            // prefer incoming here.
-                            false
-                        }
-                    };
-                    if can_take_local {
-                        log::trace!("merge: no remote change in key {} - taking local", key);
-                    } else {
-                        log::trace!("merge: conflict in existing key {} - taking remote", key);
-                        ours.insert(key, incoming_value);
-                    }
-                } else {
-                    log::trace!("merge: local and incoming same for key {}", key);
+    let old_incoming = other.clone();
+    if let Some(parent) = parent {
+        // Perform 3-way merge. First, for every key in parent,
+        // compare the parent value with the incoming value to compute
+        // an implicit "diff".
+        for (key, parent_value) in parent.into_iter() {
+            if let Some(incoming_value) = other.remove(&key) {
+                if incoming_value != parent_value {
+                    log::trace!(
+                        "merge: key {} was updated in incoming - copying value locally",
+                        key
+                    );
+                    ours.insert(key, incoming_value);
                 }
-            }
-            None => {
-                log::trace!("merge: incoming new value for key {}", key);
-                ours.insert(key, incoming_value);
+            } else {
+                // Key was not present in incoming value.
+                // Another client must have deleted it.
+                log::trace!(
+                    "merge: key {} no longer present in incoming - removing it locally",
+                    key
+                );
+                ours.remove(&key);
             }
         }
+
+        // Then, go through every remaining key in incoming. These are
+        // the ones where a corresponding key does not exist in
+        // parent, so it is a new key, and we need to add it.
+        for (key, incoming_value) in other.into_iter() {
+            log::trace!(
+                "merge: key {} doesn't occur in parent - copying from incoming",
+                key
+            );
+            ours.insert(key, incoming_value);
+        }
+    } else {
+        // No parent. Server wins. Overwrite every key in ours with
+        // the corresponding value in other.
+        log::trace!("merge: no parent - copying all keys from incoming");
+        for (key, incoming_value) in other.into_iter() {
+            ours.insert(key, incoming_value);
+        }
     }
-    IncomingAction::Merge { data: ours }
+
+    if ours == old_incoming {
+        IncomingAction::TakeRemote { data: old_incoming }
+    } else {
+        IncomingAction::Merge { data: ours }
+    }
+}
+
+fn remove_matching_keys(mut ours: JsonMap, blacklist: &JsonMap) -> JsonMap {
+    for key in blacklist.keys() {
+        ours.remove(key);
+    }
+    ours
 }
 
 // Helpers for tests
 #[cfg(test)]
 pub mod test {
     use crate::db::{test::new_mem_db, StorageDb};
     use crate::schema::create_empty_sync_temp_tables;
 
@@ -153,13 +163,56 @@ mod tests {
                 map!({"other_only": "other", "common": "old_value"}),
                 map!({"ours_only": "ours", "common": "new_value"}),
                 Some(map!({"parent_only": "parent", "common": "old_value"})),
             ),
             IncomingAction::Merge {
                 data: map!({"other_only": "other", "ours_only": "ours", "common": "new_value"})
             }
         );
+        // Field was removed remotely.
+        assert_eq!(
+            merge(
+                map!({"other_only": "other"}),
+                map!({"common": "old_value"}),
+                Some(map!({"common": "old_value"})),
+            ),
+            IncomingAction::TakeRemote {
+                data: map!({"other_only": "other"}),
+            }
+        );
+        // Field was removed remotely but we added another one.
+        assert_eq!(
+            merge(
+                map!({"other_only": "other"}),
+                map!({"common": "old_value", "new_key": "new_value"}),
+                Some(map!({"common": "old_value"})),
+            ),
+            IncomingAction::Merge {
+                data: map!({"other_only": "other", "new_key": "new_value"}),
+            }
+        );
+        // Field was removed both remotely and locally.
+        assert_eq!(
+            merge(
+                map!({}),
+                map!({"new_key": "new_value"}),
+                Some(map!({"common": "old_value"})),
+            ),
+            IncomingAction::Merge {
+                data: map!({"new_key": "new_value"}),
+            }
+        );
         Ok(())
     }
 
-    // XXX - add `fn test_2way_merging() -> Result<()> {`!!
+    #[test]
+    fn test_remove_matching_keys() -> Result<()> {
+        assert_eq!(
+            remove_matching_keys(
+                map!({"key1": "value1", "key2": "value2"}),
+                &map!({"key1": "ignored", "key3": "ignored"})
+            ),
+            map!({"key2": "value2"})
+        );
+        Ok(())
+    }
 }
--- a/third_party/rust/webext-storage/src/sync/sync_tests.rs
+++ b/third_party/rust/webext-storage/src/sync/sync_tests.rs
@@ -59,16 +59,25 @@ fn check_finished_with(conn: &Connection
         "SELECT COUNT(*) FROM storage_sync_data WHERE sync_change_counter != 0;",
         rusqlite::NO_PARAMS,
         |row| row.get::<_, u32>(0),
     )?;
     assert_eq!(count, 0);
     Ok(())
 }
 
+fn get_mirror_guid(conn: &Connection, extid: &str) -> Result<String> {
+    let guid = conn.query_row_and_then(
+        "SELECT m.guid FROM storage_sync_mirror m WHERE m.ext_id = ?;",
+        vec![extid],
+        |row| row.get::<_, String>(0),
+    )?;
+    Ok(guid)
+}
+
 #[derive(Debug, PartialEq)]
 enum DbData {
     NoRow,
     NullRow,
     Data(String),
 }
 
 impl DbData {
@@ -119,16 +128,33 @@ fn test_simple_outgoing_sync() -> Result
     let data = json!({"key1": "key1-value", "key2": "key2-value"});
     set(&tx, "ext-id", data.clone())?;
     assert_eq!(do_sync(&tx, vec![])?.len(), 1);
     check_finished_with(&tx, "ext-id", data)?;
     Ok(())
 }
 
 #[test]
+fn test_simple_incoming_sync() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value", "key2": "key2-value"});
+    let payload = Payload::from_record(Record {
+        guid: Guid::from("guid"),
+        ext_id: "ext-id".to_string(),
+        data: Some(data.to_string()),
+    })?;
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    let key1_from_api = get(&tx, "ext-id", json!("key1"))?;
+    assert_eq!(key1_from_api, json!({"key1": "key1-value"}));
+    check_finished_with(&tx, "ext-id", data)?;
+    Ok(())
+}
+
+#[test]
 fn test_simple_tombstone() -> Result<()> {
     // Tombstones are only kept when the mirror has that record - so first
     // test that, then arrange for the mirror to have the record.
     let mut db = new_syncable_mem_db();
     let tx = db.transaction()?;
     let data = json!({"key1": "key1-value", "key2": "key2-value"});
     set(&tx, "ext-id", data.clone())?;
     assert_eq!(
@@ -149,69 +175,329 @@ fn test_simple_tombstone() -> Result<()>
     // has been removed.
     assert_eq!(do_sync(&tx, vec![])?.len(), 1);
     assert_eq!(get_local_data(&tx, "ext-id"), DbData::NoRow);
     assert_eq!(get_mirror_data(&tx, "ext-id"), DbData::NullRow);
     Ok(())
 }
 
 #[test]
-fn test_merged() -> Result<()> {
+fn test_reconciled() -> Result<()> {
     let mut db = new_syncable_mem_db();
     let tx = db.transaction()?;
     let data = json!({"key1": "key1-value"});
     set(&tx, "ext-id", data)?;
-    // Incoming payload without 'key1' and conflicting for 'key2'
+    // Incoming payload with the same data
+    let payload = Payload::from_record(Record {
+        guid: Guid::from("guid"),
+        ext_id: "ext-id".to_string(),
+        data: Some(json!({"key1": "key1-value"}).to_string()),
+    })?;
+    // Should be no outgoing records as we reconciled.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    check_finished_with(&tx, "ext-id", json!({"key1": "key1-value"}))?;
+    Ok(())
+}
+
+/// Tests that we handle things correctly if we get a payload that is
+/// identical to what is in the mirrored table.
+#[test]
+fn test_reconcile_with_null_payload() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value"});
+    set(&tx, "ext-id", data.clone())?;
+    // We try to push this change on the next sync.
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    assert_eq!(
+        get_mirror_data(&tx, "ext-id"),
+        DbData::Data(data.to_string())
+    );
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    // Incoming payload with the same data.
+    // This could happen if, for example, another client changed the
+    // key and then put it back the way it was.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: Some(data.to_string()),
+    })?;
+    // Should be no outgoing records as we reconciled.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    check_finished_with(&tx, "ext-id", data)?;
+    Ok(())
+}
+
+#[test]
+fn test_accept_incoming_when_local_is_deleted() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    // We only record an extension as deleted locally if it has been
+    // uploaded before being deleted.
+    let data = json!({"key1": "key1-value"});
+    set(&tx, "ext-id", data)?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    clear(&tx, "ext-id")?;
+    // Incoming payload without 'key1'. Because we previously uploaded
+    // key1, this means another client deleted it.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: Some(json!({"key2": "key2-value"}).to_string()),
+    })?;
+    // We completely accept the incoming record.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    Ok(())
+}
+
+#[test]
+fn test_accept_incoming_when_local_is_deleted_no_mirror() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value"});
+    set(&tx, "ext-id", data)?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    clear(&tx, "ext-id")?;
+    let payload = Payload::from_record(Record {
+        // Use a random guid so that we don't find the mirrored data.
+        // This test is somewhat bad because deduping might obviate
+        // the need for it.
+        guid: Guid::from("guid"),
+        ext_id: "ext-id".to_string(),
+        data: Some(json!({"key2": "key2-value"}).to_string()),
+    })?;
+    // We completely accept the incoming record.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    Ok(())
+}
+
+#[test]
+fn test_accept_deleted_key_mirrored() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value", "key2": "key2-value"});
+    set(&tx, "ext-id", data)?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    // Incoming payload without 'key1'. Because we previously uploaded
+    // key1, this means another client deleted it.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: Some(json!({"key2": "key2-value"}).to_string()),
+    })?;
+    // We completely accept the incoming record.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    Ok(())
+}
+
+#[test]
+fn test_merged_no_mirror() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value"});
+    set(&tx, "ext-id", data)?;
+    // Incoming payload without 'key1' and some data for 'key2'.
+    // Because we never uploaded 'key1', we merge our local values
+    // with the remote.
     let payload = Payload::from_record(Record {
         guid: Guid::from("guid"),
         ext_id: "ext-id".to_string(),
         data: Some(json!({"key2": "key2-value"}).to_string()),
     })?;
     assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
     check_finished_with(
         &tx,
         "ext-id",
         json!({"key1": "key1-value", "key2": "key2-value"}),
     )?;
     Ok(())
 }
 
 #[test]
-fn test_reconciled() -> Result<()> {
+fn test_merged_incoming() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let old_data = json!({"key1": "key1-value", "key2": "key2-value", "doomed_key": "deletable"});
+    set(&tx, "ext-id", old_data)?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    // We update 'key1' locally.
+    let local_data = json!({"key1": "key1-new", "key2": "key2-value", "doomed_key": "deletable"});
+    set(&tx, "ext-id", local_data)?;
+    // Incoming payload where another client set 'key2' and removed
+    // the 'doomed_key'.
+    // Because we never uploaded our data, we'll merge our
+    // key1 in, but otherwise keep the server's changes.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: Some(json!({"key1": "key1-value", "key2": "key2-incoming"}).to_string()),
+    })?;
+    // We should send our 'key1'
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
+    check_finished_with(
+        &tx,
+        "ext-id",
+        json!({"key1": "key1-new", "key2": "key2-incoming"}),
+    )?;
+    Ok(())
+}
+
+#[test]
+fn test_merged_with_null_payload() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let old_data = json!({"key1": "key1-value"});
+    set(&tx, "ext-id", old_data.clone())?;
+    // Push this change remotely.
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    assert_eq!(
+        get_mirror_data(&tx, "ext-id"),
+        DbData::Data(old_data.to_string())
+    );
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    let local_data = json!({"key1": "key1-new", "key2": "key2-value"});
+    set(&tx, "ext-id", local_data.clone())?;
+    // Incoming payload with the same old data.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: Some(old_data.to_string()),
+    })?;
+    // Three-way-merge will not detect any change in key1, so we
+    // should keep our entire new value.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
+    check_finished_with(&tx, "ext-id", local_data)?;
+    Ok(())
+}
+
+#[test]
+fn test_deleted_mirrored_object_accept() -> Result<()> {
     let mut db = new_syncable_mem_db();
     let tx = db.transaction()?;
-    let data = json!({"key1": "key1-value"});
+    let data = json!({"key1": "key1-value", "key2": "key2-value"});
     set(&tx, "ext-id", data)?;
-    // Incoming payload without 'key1' and conflicting for 'key2'
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    // Incoming payload with data deleted.
+    // We synchronize this deletion by deleting the keys we think
+    // were on the server.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: None,
+    })?;
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    assert_eq!(get_local_data(&tx, "ext-id"), DbData::NullRow);
+    assert_eq!(get_mirror_data(&tx, "ext-id"), DbData::NullRow);
+    Ok(())
+}
+
+#[test]
+fn test_deleted_mirrored_object_merged() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    set(
+        &tx,
+        "ext-id",
+        json!({"key1": "key1-new", "key2": "key2-value"}),
+    )?;
+    // Incoming payload with data deleted.
+    // We synchronize this deletion by deleting the keys we think
+    // were on the server.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: None,
+    })?;
+    // This overrides the change to 'key1', but we still upload 'key2'.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
+    check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    Ok(())
+}
+
+/// Like the above test, but with a mirrored tombstone.
+#[test]
+fn test_deleted_mirrored_tombstone_merged() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    // Sync some data so we can get the guid for this extension.
+    set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
+    assert_eq!(do_sync(&tx, vec![])?.len(), 1);
+    let guid = get_mirror_guid(&tx, "ext-id")?;
+    // Sync a delete for this data so we have a tombstone in the mirror.
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid.clone()),
+        ext_id: "ext-id".to_string(),
+        data: None,
+    })?;
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
+    assert_eq!(get_mirror_data(&tx, "ext-id"), DbData::NullRow);
+
+    // Set some data and sync it simultaneously with another incoming delete.
+    set(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    let payload = Payload::from_record(Record {
+        guid: Guid::from(guid),
+        ext_id: "ext-id".to_string(),
+        data: None,
+    })?;
+    // We cannot delete any matching keys because there are no
+    // matching keys. Instead we push our data.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
+    check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
+    Ok(())
+}
+
+#[test]
+fn test_deleted_not_mirrored_object_merged() -> Result<()> {
+    let mut db = new_syncable_mem_db();
+    let tx = db.transaction()?;
+    let data = json!({"key1": "key1-value", "key2": "key2-value"});
+    set(&tx, "ext-id", data)?;
+    // Incoming payload with data deleted.
     let payload = Payload::from_record(Record {
         guid: Guid::from("guid"),
         ext_id: "ext-id".to_string(),
-        data: Some(json!({"key1": "key1-value"}).to_string()),
+        data: None,
     })?;
-    // Should be no outgoing records as we reconciled.
-    assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
-    check_finished_with(&tx, "ext-id", json!({"key1": "key1-value"}))?;
+    // We normally delete the keys we think were on the server, but
+    // here we have no information about what was on the server, so we
+    // don't delete anything. We merge in all undeleted keys.
+    assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
+    check_finished_with(
+        &tx,
+        "ext-id",
+        json!({"key1": "key1-value", "key2": "key2-value"}),
+    )?;
     Ok(())
 }
 
 #[test]
 fn test_conflicting_incoming() -> Result<()> {
     let mut db = new_syncable_mem_db();
     let tx = db.transaction()?;
     let data = json!({"key1": "key1-value", "key2": "key2-value"});
     set(&tx, "ext-id", data)?;
-    // Incoming payload without 'key1' and conflicting for 'key2'
+    // Incoming payload without 'key1' and conflicting for 'key2'.
+    // Because we never uploaded either of our keys, we'll merge our
+    // key1 in, but the server key2 wins.
     let payload = Payload::from_record(Record {
         guid: Guid::from("guid"),
         ext_id: "ext-id".to_string(),
         data: Some(json!({"key2": "key2-incoming"}).to_string()),
     })?;
+    // We should send our 'key1'
     assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
     check_finished_with(
         &tx,
         "ext-id",
         json!({"key1": "key1-value", "key2": "key2-incoming"}),
     )?;
     Ok(())
 }
-
-// There are lots more we could add here, particularly around the resolution of
-// deletion of keys and deletions of the entire value.
--- a/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml
+++ b/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml
@@ -13,10 +13,10 @@ moz_task = { path = "../../../../../xpco
 nserror = { path = "../../../../../xpcom/rust/nserror" }
 nsstring = { path = "../../../../../xpcom/rust/nsstring" }
 once_cell = "1"
 thin-vec = { version = "0.1.0", features = ["gecko-ffi"] }
 xpcom = { path = "../../../../../xpcom/rust/xpcom" }
 serde = "1"
 serde_json = "1"
 storage_variant = { path = "../../../../../storage/variant" }
-sql-support = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
-webext-storage = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
+sql-support = { git = "https://github.com/mozilla/application-services", rev = "7352f64601cdbf39d28831d30d246f7340ef31b3" }
+webext-storage = { git = "https://github.com/mozilla/application-services", rev = "7352f64601cdbf39d28831d30d246f7340ef31b3" }