Bug 1594995 - Part 3: Vendor newly added rust dependencies, r=keeler
authorVictor Porof <vporof@mozilla.com>
Wed, 13 Nov 2019 11:52:46 +0000
changeset 501779 72a83016b34011cbedc5e576ea70e058361ee69c
parent 501778 fc5e72bd0abfdfe155acad1f474f6b6651819ad6
child 501780 775a103cdc2c26daa4421a6efabeb94efa654b48
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskeeler
bugs1594995
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1594995 - Part 3: Vendor newly added rust dependencies, r=keeler Differential Revision: https://phabricator.services.mozilla.com/D52318
.cargo/config.in
Cargo.lock
third_party/rust/id-arena/.cargo-checksum.json
third_party/rust/id-arena/CHANGELOG.md
third_party/rust/id-arena/Cargo.toml
third_party/rust/id-arena/LICENSE-APACHE
third_party/rust/id-arena/LICENSE-MIT
third_party/rust/id-arena/README.md
third_party/rust/id-arena/README.tpl
third_party/rust/id-arena/src/lib.rs
third_party/rust/id-arena/src/rayon.rs
third_party/rust/id-arena/tests/readme_up_to_date.rs
third_party/rust/rkv-0.10.2/.cargo-checksum.json
third_party/rust/rkv-0.10.2/CODE_OF_CONDUCT.md
third_party/rust/rkv-0.10.2/Cargo.lock
third_party/rust/rkv-0.10.2/Cargo.toml
third_party/rust/rkv-0.10.2/LICENSE
third_party/rust/rkv-0.10.2/README.md
third_party/rust/rkv-0.10.2/examples/README.md
third_party/rust/rkv-0.10.2/examples/iterator.rs
third_party/rust/rkv-0.10.2/examples/simple-store.rs
third_party/rust/rkv-0.10.2/run-all-examples.sh
third_party/rust/rkv-0.10.2/src/bin/dump.rs
third_party/rust/rkv-0.10.2/src/bin/rand.rs
third_party/rust/rkv-0.10.2/src/env.rs
third_party/rust/rkv-0.10.2/src/error.rs
third_party/rust/rkv-0.10.2/src/lib.rs
third_party/rust/rkv-0.10.2/src/manager.rs
third_party/rust/rkv-0.10.2/src/migrate.rs
third_party/rust/rkv-0.10.2/src/readwrite.rs
third_party/rust/rkv-0.10.2/src/store.rs
third_party/rust/rkv-0.10.2/src/store/integer.rs
third_party/rust/rkv-0.10.2/src/store/integermulti.rs
third_party/rust/rkv-0.10.2/src/store/multi.rs
third_party/rust/rkv-0.10.2/src/store/single.rs
third_party/rust/rkv-0.10.2/src/value.rs
third_party/rust/rkv-0.10.2/tests/integer-store.rs
third_party/rust/rkv-0.10.2/tests/manager.rs
third_party/rust/rkv-0.10.2/tests/multi-integer-store.rs
third_party/rust/rkv-0.10.2/tests/test_txn.rs
third_party/rust/rkv/.appveyor.yml
third_party/rust/rkv/.cargo-checksum.json
third_party/rust/rkv/.rustfmt.toml
third_party/rust/rkv/.travis.yml
third_party/rust/rkv/Cargo.lock
third_party/rust/rkv/Cargo.toml
third_party/rust/rkv/examples/iterator.rs
third_party/rust/rkv/examples/simple-store.rs
third_party/rust/rkv/src/backend.rs
third_party/rust/rkv/src/backend/common.rs
third_party/rust/rkv/src/backend/impl_lmdb.rs
third_party/rust/rkv/src/backend/impl_lmdb/cursor.rs
third_party/rust/rkv/src/backend/impl_lmdb/database.rs
third_party/rust/rkv/src/backend/impl_lmdb/environment.rs
third_party/rust/rkv/src/backend/impl_lmdb/error.rs
third_party/rust/rkv/src/backend/impl_lmdb/flags.rs
third_party/rust/rkv/src/backend/impl_lmdb/info.rs
third_party/rust/rkv/src/backend/impl_lmdb/iter.rs
third_party/rust/rkv/src/backend/impl_lmdb/stat.rs
third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs
third_party/rust/rkv/src/backend/impl_safe.rs
third_party/rust/rkv/src/backend/impl_safe/cursor.rs
third_party/rust/rkv/src/backend/impl_safe/database.rs
third_party/rust/rkv/src/backend/impl_safe/environment.rs
third_party/rust/rkv/src/backend/impl_safe/error.rs
third_party/rust/rkv/src/backend/impl_safe/flags.rs
third_party/rust/rkv/src/backend/impl_safe/info.rs
third_party/rust/rkv/src/backend/impl_safe/iter.rs
third_party/rust/rkv/src/backend/impl_safe/snapshot.rs
third_party/rust/rkv/src/backend/impl_safe/stat.rs
third_party/rust/rkv/src/backend/impl_safe/transaction.rs
third_party/rust/rkv/src/backend/traits.rs
third_party/rust/rkv/src/bin/dump.rs
third_party/rust/rkv/src/bin/rand.rs
third_party/rust/rkv/src/env.rs
third_party/rust/rkv/src/error.rs
third_party/rust/rkv/src/helpers.rs
third_party/rust/rkv/src/lib.rs
third_party/rust/rkv/src/manager.rs
third_party/rust/rkv/src/readwrite.rs
third_party/rust/rkv/src/store.rs
third_party/rust/rkv/src/store/integer.rs
third_party/rust/rkv/src/store/integermulti.rs
third_party/rust/rkv/src/store/keys.rs
third_party/rust/rkv/src/store/keys/encodables.rs
third_party/rust/rkv/src/store/keys/primitives.rs
third_party/rust/rkv/src/store/multi.rs
third_party/rust/rkv/src/store/single.rs
third_party/rust/rkv/src/value.rs
third_party/rust/rkv/tests/integer-store.rs
third_party/rust/rkv/tests/manager.rs
third_party/rust/rkv/tests/multi-integer-store.rs
third_party/rust/rkv/tests/test_txn.rs
--- a/.cargo/config.in
+++ b/.cargo/config.in
@@ -2,16 +2,21 @@
 # It was generated by `mach vendor rust`.
 # Please do not edit.
 
 [source."https://github.com/shravanrn/nix/"]
 branch = "r0.13.1"
 git = "https://github.com/shravanrn/nix/"
 replace-with = "vendored-sources"
 
+[source."https://github.com/mozilla/rkv"]
+branch = "safe-mode"
+git = "https://github.com/mozilla/rkv"
+replace-with = "vendored-sources"
+
 [source."https://github.com/mozilla/neqo"]
 git = "https://github.com/mozilla/neqo"
 replace-with = "vendored-sources"
 rev = "a17c1e83"
 
 [source."https://github.com/hsivonen/packed_simd"]
 branch = "rust_1_32"
 git = "https://github.com/hsivonen/packed_simd"
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -416,22 +416,21 @@ source = "registry+https://github.com/ru
 
 [[package]]
 name = "cert_storage"
 version = "0.0.1"
 dependencies = [
  "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "lmdb-rkv 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "moz_task 0.1.0",
  "nserror 0.1.0",
  "nsstring 0.1.0",
- "rkv 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rkv 0.11.0 (git+https://github.com/mozilla/rkv?branch=safe-mode)",
  "rust_cascade 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "storage_variant 0.1.0",
  "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "thin-vec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
  "xpcom 0.1.0",
 ]
@@ -1436,16 +1435,21 @@ dependencies = [
  "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-threadpool 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
  "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
  "want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "id-arena"
+version = "2.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "ident_case"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "idna"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2799,16 +2803,37 @@ dependencies = [
  "ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
  "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "rkv"
+version = "0.11.0"
+source = "git+https://github.com/mozilla/rkv?branch=safe-mode#6a866fdad2ca880df9b87fcbc9921abac1e91914"
+dependencies = [
+ "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bincode 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "id-arena 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lmdb-rkv 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_derive 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
+ "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rlbox_lucet_sandbox"
 version = "0.1.0"
 source = "git+https://github.com/PLSysSec/rlbox_lucet_sandbox/?rev=997c648eb0eaeaaa7a00a9eee20431f750b4e190#997c648eb0eaeaaa7a00a9eee20431f750b4e190"
 dependencies = [
  "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "goblin 0.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
  "lucet-module 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)",
  "lucet-runtime 0.1.1 (git+https://github.com/PLSysSec/lucet_sandbox_compiler)",
@@ -4221,16 +4246,17 @@ dependencies = [
 "checksum h2 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "a27e7ed946e8335bdf9a191bc1b9b14a03ba822d013d2f58437f4fabcbd7fc2c"
 "checksum headers 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc6e2e51d356081258ef05ff4c648138b5d3fe64b7300aaad3b820554a2b7fb6"
 "checksum headers-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "967131279aaa9f7c20c7205b45a391638a83ab118e6509b2d0ccbe08de044237"
 "checksum headers-derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f33cf300c485e3cbcba0235013fcc768723451c9b84d1b31aa7fec0491ac9a11"
 "checksum http 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "eed324f0f0daf6ec10c474f150505af2c143f251722bf9dbd1261bd1f2ee2c1a"
 "checksum httparse 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e8734b0cfd3bc3e101ec59100e101c2eecd19282202e87808b3037b442777a83"
 "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
 "checksum hyper 0.12.19 (registry+https://github.com/rust-lang/crates.io-index)" = "f1ebec079129e43af5e234ef36ee3d7e6085687d145b7ea653b262d16c6b65f1"
+"checksum id-arena 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"
 "checksum ident_case 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c9826188e666f2ed92071d2dadef6edc430b11b158b5b2b3f4babbcc891eaaa"
 "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9"
 "checksum image 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)" = "663a975007e0b49903e2e8ac0db2c432c465855f2d65f17883ba1476e85f0b42"
 "checksum indexmap 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a4d6d89e0948bf10c08b9ecc8ac5b83f07f857ebe2c0cbe38de15b4e4f510356"
 "checksum inflate 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1cdb29978cc5797bd8dcc8e5bf7de604891df2a8dc576973d71a281e916db2ff"
 "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
 "checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358"
 "checksum itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c069bbec61e1ca5a596166e55dfe4773ff745c3d16b700013bcaff9a6df2c682"
@@ -4339,16 +4365,17 @@ dependencies = [
 "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
 "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
 "checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d"
 "checksum regex 1.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d9d8297cc20bbb6184f8b45ff61c8ee6a9ac56c156cec8e38c3e5084773c44ad"
 "checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
 "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
 "checksum ringbuf 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "588456c74d5ff0a5806bc084818e043e767533f743c11ee6f3ccf298599c6847"
 "checksum rkv 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf"
+"checksum rkv 0.11.0 (git+https://github.com/mozilla/rkv?branch=safe-mode)" = "<none>"
 "checksum rlbox_lucet_sandbox 0.1.0 (git+https://github.com/PLSysSec/rlbox_lucet_sandbox/?rev=997c648eb0eaeaaa7a00a9eee20431f750b4e190)" = "<none>"
 "checksum ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "da06feaa07f69125ab9ddc769b11de29090122170b402547f64b86fe16ebc399"
 "checksum runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d79b4b604167921892e84afbbaad9d5ad74e091bf6c511d9dbfb0593f09fabd"
 "checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf"
 "checksum rust-ini 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8a654c5bda722c699be6b0fe4c0d90de218928da5b724c3e467fc48865c37263"
 "checksum rust_cascade 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f3fe4900d38dab1ad21a515e44687dd0711e6b0ec5b214a3b1aa8857343bcf3a"
 "checksum rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "76d7ba1feafada44f2d38eed812bd2489a03c0f5abb975799251518b68848649"
 "checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"CHANGELOG.md":"44d2bc9ae9829b9d80bbd64cf758a29c5b7a136e8049bde601f25b504ff5daf8","Cargo.toml":"68ffe09814502adc81ab77dacc5d76e5f439435a71b48ec9c575289193945cb7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"74619b782c5085d5e12762a2a209555e90770d0e08048d95a31f95febac0b4c6","README.tpl":"ec385000e14590a306855e7893daed0168102f33166bdc1e5cf5fa5599dac03f","src/lib.rs":"ee705a8a93ccfa0f958e421a1e27440e5b92afd422ee6579f66282287cb9abe8","src/rayon.rs":"48807a5563e6c248bab2731b60b00148084db9c071cf3c47cdb12dc7ecfa84e0","tests/readme_up_to_date.rs":"8db3e41d803e2a10307e7e35cb2afa6733e1c39ad34789752a927564bf6795b6"},"package":"25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/CHANGELOG.md
@@ -0,0 +1,65 @@
+# 2.2.1
+
+Released 2019-02-15.
+
+* Make sure our rayon parallel iterators are exported. Previously instances of
+  them were returned by `pub` methods but the types themselves were not
+  exported.
+
+# 2.2.0
+
+Released 2019-01-30.
+
+* Add the `Arena::alloc_with_id` method. This is better than using
+  `Arena::next_id` directly most of the time (but is also not *quite* as
+  flexible). See [#9](https://github.com/fitzgen/id-arena/issues/9) and
+  [#10](https://github.com/fitzgen/id-arena/pull/10).
+
+--------------------------------------------------------------------------------
+
+# 2.1.0
+
+Released 2019-01-25.
+
+* Added optional support for `rayon` parallel iteration. Enable the `rayon`
+  Cargo feature to get access.
+
+--------------------------------------------------------------------------------
+
+# 2.0.1
+
+Released 2019-01-09.
+
+* Implemented `Ord` and `PartialOrd` for `Id<T>`.
+* Added an `Arena::with_capacity` constructor.
+* Added `Arena::next_id` to get the id that will be used for the next
+  allocation.
+
+--------------------------------------------------------------------------------
+
+# 2.0.0
+
+Released 2018-11-28.
+
+* Introduces the `ArenaBehavior` trait, which allows one to customize identifier
+  types and do things like implement space optimizations or use identifiers for
+  many arenas at once.
+* Implements `Clone`, `PartialEq` and `Eq` for arenas.
+
+--------------------------------------------------------------------------------
+
+# 1.0.2
+
+Released 2018-11-25.
+
+* `Id<T>` now implements `Send` and `Sync`
+* The `PartialEq` implementation for `Id<T>` now correctly checks that two ids
+  are for the same arena when checking equality.
+
+--------------------------------------------------------------------------------
+
+# 1.0.1
+
+--------------------------------------------------------------------------------
+
+# 1.0.0
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "id-arena"
+version = "2.2.1"
+authors = ["Nick Fitzgerald <fitzgen@gmail.com>", "Aleksey Kladov <aleksey.kladov@gmail.com>"]
+description = "A simple, id-based arena."
+documentation = "https://docs.rs/id-arena"
+readme = "README.md"
+categories = ["memory-management", "rust-patterns", "no-std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/fitzgen/id-arena"
+[package.metadata.docs.rs]
+features = ["rayon"]
+[dependencies.rayon]
+version = "1.0.3"
+optional = true
+
+[features]
+default = ["std"]
+std = []
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/README.md
@@ -0,0 +1,100 @@
+# `id-arena`
+
+[![](https://img.shields.io/crates/v/id-arena.svg)](https://crates.io/crates/id-arena)
+[![](https://img.shields.io/crates/d/id-arena.svg)](https://crates.io/crates/id-arena)
+[![Travis CI Build Status](https://travis-ci.org/fitzgen/id-arena.svg?branch=master)](https://travis-ci.org/fitzgen/id-arena)
+
+A simple, id-based arena.
+
+### Id-based
+
+Allocate objects and get an identifier for that object back, *not* a
+reference to the allocated object. Given an id, you can get a shared or
+exclusive reference to the allocated object from the arena. This id-based
+approach is useful for constructing mutable graph data structures.
+
+If you want allocation to return a reference, consider [the `typed-arena`
+crate](https://github.com/SimonSapin/rust-typed-arena/) instead.
+
+### No Deletion
+
+This arena does not support deletion, which makes its implementation simple
+and allocation fast. If you want deletion, you need a way to solve the ABA
+problem. Consider using [the `generational-arena`
+crate](https://github.com/fitzgen/generational-arena) instead.
+
+### Homogeneous
+
+This crate's arenas can only contain objects of a single type `T`. If you
+need an arena of objects with heterogeneous types, consider another crate.
+
+### `#![no_std]` Support
+
+Requires the `alloc` nightly feature. Disable the on-by-default `"std"` feature:
+
+```toml
+[dependencies.id-arena]
+version = "2"
+default-features = false
+```
+
+### `rayon` Support
+
+If the `rayon` feature of this crate is activated:
+
+```toml
+[dependencies]
+id-arena = { version = "2", features = ["rayon"] }
+```
+
+then you can use [`rayon`](https://crates.io/crates/rayon)'s support for
+parallel iteration. The `Arena` type will have a `par_iter` family of
+methods where appropriate.
+
+### Example
+
+```rust
+use id_arena::{Arena, Id};
+
+type AstNodeId = Id<AstNode>;
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum AstNode {
+    Const(i64),
+    Var(String),
+    Add {
+        lhs: AstNodeId,
+        rhs: AstNodeId,
+    },
+    Sub {
+        lhs: AstNodeId,
+        rhs: AstNodeId,
+    },
+    Mul {
+        lhs: AstNodeId,
+        rhs: AstNodeId,
+    },
+    Div {
+        lhs: AstNodeId,
+        rhs: AstNodeId,
+    },
+}
+
+let mut ast_nodes = Arena::<AstNode>::new();
+
+// Create the AST for `a * (b + 3)`.
+let three = ast_nodes.alloc(AstNode::Const(3));
+let b = ast_nodes.alloc(AstNode::Var("b".into()));
+let b_plus_three = ast_nodes.alloc(AstNode::Add {
+    lhs: b,
+    rhs: three,
+});
+let a = ast_nodes.alloc(AstNode::Var("a".into()));
+let a_times_b_plus_three = ast_nodes.alloc(AstNode::Mul {
+    lhs: a,
+    rhs: b_plus_three,
+});
+
+// Can use indexing to access allocated nodes.
+assert_eq!(ast_nodes[three], AstNode::Const(3));
+```
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/README.tpl
@@ -0,0 +1,3 @@
+# `{{crate}}`
+
+{{readme}}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/src/lib.rs
@@ -0,0 +1,726 @@
+//! [![](https://img.shields.io/crates/v/id-arena.svg)](https://crates.io/crates/id-arena)
+//! [![](https://img.shields.io/crates/d/id-arena.svg)](https://crates.io/crates/id-arena)
+//! [![Travis CI Build Status](https://travis-ci.org/fitzgen/id-arena.svg?branch=master)](https://travis-ci.org/fitzgen/id-arena)
+//!
+//! A simple, id-based arena.
+//!
+//! ## Id-based
+//!
+//! Allocate objects and get an identifier for that object back, *not* a
+//! reference to the allocated object. Given an id, you can get a shared or
+//! exclusive reference to the allocated object from the arena. This id-based
+//! approach is useful for constructing mutable graph data structures.
+//!
+//! If you want allocation to return a reference, consider [the `typed-arena`
+//! crate](https://github.com/SimonSapin/rust-typed-arena/) instead.
+//!
+//! ## No Deletion
+//!
+//! This arena does not support deletion, which makes its implementation simple
+//! and allocation fast. If you want deletion, you need a way to solve the ABA
+//! problem. Consider using [the `generational-arena`
+//! crate](https://github.com/fitzgen/generational-arena) instead.
+//!
+//! ## Homogeneous
+//!
+//! This crate's arenas can only contain objects of a single type `T`. If you
+//! need an arena of objects with heterogeneous types, consider another crate.
+//!
+//! ## `#![no_std]` Support
+//!
+//! Requires the `alloc` nightly feature. Disable the on-by-default `"std"` feature:
+//!
+//! ```toml
+//! [dependencies.id-arena]
+//! version = "2"
+//! default-features = false
+//! ```
+//!
+//! ## `rayon` Support
+//!
+//! If the `rayon` feature of this crate is activated:
+//!
+//! ```toml
+//! [dependencies]
+//! id-arena = { version = "2", features = ["rayon"] }
+//! ```
+//!
+//! then you can use [`rayon`](https://crates.io/crates/rayon)'s support for
+//! parallel iteration. The `Arena` type will have a `par_iter` family of
+//! methods where appropriate.
+//!
+//! ## Example
+//!
+//! ```rust
+//! use id_arena::{Arena, Id};
+//!
+//! type AstNodeId = Id<AstNode>;
+//!
+//! #[derive(Debug, Eq, PartialEq)]
+//! pub enum AstNode {
+//!     Const(i64),
+//!     Var(String),
+//!     Add {
+//!         lhs: AstNodeId,
+//!         rhs: AstNodeId,
+//!     },
+//!     Sub {
+//!         lhs: AstNodeId,
+//!         rhs: AstNodeId,
+//!     },
+//!     Mul {
+//!         lhs: AstNodeId,
+//!         rhs: AstNodeId,
+//!     },
+//!     Div {
+//!         lhs: AstNodeId,
+//!         rhs: AstNodeId,
+//!     },
+//! }
+//!
+//! let mut ast_nodes = Arena::<AstNode>::new();
+//!
+//! // Create the AST for `a * (b + 3)`.
+//! let three = ast_nodes.alloc(AstNode::Const(3));
+//! let b = ast_nodes.alloc(AstNode::Var("b".into()));
+//! let b_plus_three = ast_nodes.alloc(AstNode::Add {
+//!     lhs: b,
+//!     rhs: three,
+//! });
+//! let a = ast_nodes.alloc(AstNode::Var("a".into()));
+//! let a_times_b_plus_three = ast_nodes.alloc(AstNode::Mul {
+//!     lhs: a,
+//!     rhs: b_plus_three,
+//! });
+//!
+//! // Can use indexing to access allocated nodes.
+//! assert_eq!(ast_nodes[three], AstNode::Const(3));
+//! ```
+
+#![forbid(unsafe_code)]
+#![deny(missing_debug_implementations)]
+#![deny(missing_docs)]
+// In no-std mode, use the alloc crate to get `Vec`.
+#![no_std]
+#![cfg_attr(not(feature = "std"), feature(alloc))]
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter;
+use core::marker::PhantomData;
+use core::ops;
+use core::slice;
+use core::sync::atomic::{self, AtomicUsize, ATOMIC_USIZE_INIT};
+
+#[cfg(not(feature = "std"))]
+extern crate alloc;
+#[cfg(not(feature = "std"))]
+use alloc::vec::{self, Vec};
+
+#[cfg(feature = "std")]
+extern crate std;
+#[cfg(feature = "std")]
+use std::vec::{self, Vec};
+
+#[cfg(feature = "rayon")]
+mod rayon;
+#[cfg(feature = "rayon")]
+pub use rayon::*;
+
+/// A trait representing the implementation behavior of an arena and how
+/// identifiers are represented.
+///
+/// ## When should I implement `ArenaBehavior` myself?
+///
+/// Usually, you should just use `DefaultArenaBehavior`, which is simple and
+/// correct. However, there are some scenarios where you might want to implement
+/// `ArenaBehavior` yourself:
+///
+/// * **Space optimizations:** The default identifier is two words in size,
+/// which is larger than is usually necessary. For example, if you know that an
+/// arena *cannot* contain more than 256 items, you could make your own
+/// identifier type that stores the index as a `u8` and then you can save some
+/// space.
+///
+/// * **Trait Coherence:** If you need to implement an upstream crate's traits
+/// for identifiers, then defining your own identifier type allows you to work
+/// with trait coherence rules.
+///
+/// * **Share identifiers across arenas:** You can coordinate and share
+/// identifiers across different arenas to enable a "struct of arrays" style
+/// data representation.
+pub trait ArenaBehavior {
+    /// The identifier type.
+    type Id: Copy;
+
+    /// Construct a new object identifier from the given index and arena
+    /// identifier.
+    ///
+    /// ## Panics
+    ///
+    /// Implementations are allowed to panic if the given index is larger than
+    /// the underlying storage (e.g. the implementation uses a `u8` for storing
+    /// indices and the given index value is larger than 255).
+    fn new_id(arena_id: u32, index: usize) -> Self::Id;
+
+    /// Get the given identifier's index.
+    fn index(Self::Id) -> usize;
+
+    /// Get the given identifier's arena id.
+    fn arena_id(Self::Id) -> u32;
+
+    /// Construct a new arena identifier.
+    ///
+    /// This is used to disambiguate `Id`s across different arenas. To make
+    /// identifiers with the same index from different arenas compare false for
+    /// equality, return a unique `u32` on every invocation. This is the
+    /// default, provided implementation's behavior.
+    ///
+    /// To make identifiers with the same index from different arenas compare
+    /// true for equality, return the same `u32` on every invocation.
+    fn new_arena_id() -> u32 {
+        static ARENA_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
+        ARENA_COUNTER.fetch_add(1, atomic::Ordering::SeqCst) as u32
+    }
+}
+
+/// An identifier for an object allocated within an arena.
+pub struct Id<T> {
+    idx: usize,
+    arena_id: u32,
+    _ty: PhantomData<fn() -> T>,
+}
+
+impl<T> fmt::Debug for Id<T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Id").field("idx", &self.idx).finish()
+    }
+}
+
+impl<T> Copy for Id<T> {}
+
+impl<T> Clone for Id<T> {
+    #[inline]
+    fn clone(&self) -> Id<T> {
+        *self
+    }
+}
+
+impl<T> PartialEq for Id<T> {
+    #[inline]
+    fn eq(&self, rhs: &Self) -> bool {
+        self.arena_id == rhs.arena_id && self.idx == rhs.idx
+    }
+}
+
+impl<T> Eq for Id<T> {}
+
+impl<T> Hash for Id<T> {
+    #[inline]
+    fn hash<H: Hasher>(&self, h: &mut H) {
+        self.arena_id.hash(h);
+        self.idx.hash(h);
+    }
+}
+
+impl<T> PartialOrd for Id<T> {
+    fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
+        Some(self.cmp(rhs))
+    }
+}
+
+impl<T> Ord for Id<T> {
+    fn cmp(&self, rhs: &Self) -> Ordering {
+        self.arena_id
+            .cmp(&rhs.arena_id)
+            .then(self.idx.cmp(&rhs.idx))
+    }
+}
+
+impl<T> Id<T> {
+    /// Get the index within the arena that this id refers to.
+    #[inline]
+    pub fn index(&self) -> usize {
+        self.idx
+    }
+}
+
+/// The default `ArenaBehavior` implementation.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct DefaultArenaBehavior<T> {
+    _phantom: PhantomData<fn() -> T>,
+}
+
+impl<T> ArenaBehavior for DefaultArenaBehavior<T> {
+    type Id = Id<T>;
+
+    #[inline]
+    fn new_id(arena_id: u32, idx: usize) -> Self::Id {
+        Id {
+            idx,
+            arena_id,
+            _ty: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn index(id: Self::Id) -> usize {
+        id.idx
+    }
+
+    #[inline]
+    fn arena_id(id: Self::Id) -> u32 {
+        id.arena_id
+    }
+}
+
+/// An arena of objects of type `T`.
+///
+/// ```
+/// use id_arena::Arena;
+///
+/// let mut arena = Arena::<&str>::new();
+///
+/// let a = arena.alloc("Albert");
+/// assert_eq!(arena[a], "Albert");
+///
+/// arena[a] = "Alice";
+/// assert_eq!(arena[a], "Alice");
+/// ```
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct Arena<T, A = DefaultArenaBehavior<T>> {
+    arena_id: u32,
+    items: Vec<T>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<T, A> Default for Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    #[inline]
+    fn default() -> Arena<T, A> {
+        Arena {
+            arena_id: A::new_arena_id(),
+            items: Vec::new(),
+            _phantom: PhantomData,
+        }
+    }
+}
+
+impl<T, A> Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    /// Construct a new, empty `Arena`.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<usize>::new();
+    /// arena.alloc(42);
+    /// ```
+    #[inline]
+    pub fn new() -> Arena<T, A> {
+        Default::default()
+    }
+
+    /// Construct a new, empty `Arena` with capacity for the given number of
+    /// elements.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<usize>::with_capacity(100);
+    /// for x in 0..100 {
+    ///     arena.alloc(x * x);
+    /// }
+    /// ```
+    #[inline]
+    pub fn with_capacity(capacity: usize) -> Arena<T, A> {
+        Arena {
+            arena_id: A::new_arena_id(),
+            items: Vec::with_capacity(capacity),
+            _phantom: PhantomData,
+        }
+    }
+
+    /// Allocate `item` within this arena and return its id.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<usize>::new();
+    /// let _id = arena.alloc(42);
+    /// ```
+    ///
+    /// ## Panics
+    ///
+    /// Panics if the number of elements in the arena overflows a `usize` or
+    /// `Id`'s index storage representation.
+    #[inline]
+    pub fn alloc(&mut self, item: T) -> A::Id {
+        let id = self.next_id();
+        self.items.push(item);
+        id
+    }
+
+    /// Allocate an item with the id that it will be assigned.
+    ///
+    /// This is useful for structures that want to store their id as their own
+    /// member.
+    ///
+    /// ```
+    /// use id_arena::{Arena, Id};
+    ///
+    /// struct Cat {
+    ///     id: Id<Cat>,
+    /// }
+    ///
+    /// let mut arena = Arena::<Cat>::new();
+    ///
+    /// let kitty = arena.alloc_with_id(|id| Cat { id });
+    /// assert_eq!(arena[kitty].id, kitty);
+    /// ```
+    #[inline]
+    pub fn alloc_with_id(&mut self, f: impl FnOnce(A::Id) -> T) -> A::Id {
+        let id = self.next_id();
+        let val = f(id);
+        self.alloc(val)
+    }
+
+    /// Get the id that will be used for the next item allocated into this
+    /// arena.
+    ///
+    /// If you are allocating a `struct` that wants to have its id as a member
+    /// of itself, prefer the less error-prone `Arena::alloc_with_id` method.
+    #[inline]
+    pub fn next_id(&self) -> A::Id {
+        let arena_id = self.arena_id;
+        let idx = self.items.len();
+        A::new_id(arena_id, idx)
+    }
+
+    /// Get a shared reference to the object associated with the given `id` if
+    /// it exists.
+    ///
+    /// If there is no object associated with `id` (for example, it might
+    /// reference an object allocated within a different arena) then return
+    /// `None`.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<usize>::new();
+    /// let id = arena.alloc(42);
+    /// assert!(arena.get(id).is_some());
+    ///
+    /// let other_arena = Arena::<usize>::new();
+    /// assert!(other_arena.get(id).is_none());
+    /// ```
+    #[inline]
+    pub fn get(&self, id: A::Id) -> Option<&T> {
+        if A::arena_id(id) != self.arena_id {
+            None
+        } else {
+            self.items.get(A::index(id))
+        }
+    }
+
+    /// Get an exclusive reference to the object associated with the given `id`
+    /// if it exists.
+    ///
+    /// If there is no object associated with `id` (for example, it might
+    /// reference an object allocated within a different arena) then return
+    /// `None`.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<usize>::new();
+    /// let id = arena.alloc(42);
+    /// assert!(arena.get_mut(id).is_some());
+    ///
+    /// let mut other_arena = Arena::<usize>::new();
+    /// assert!(other_arena.get_mut(id).is_none());
+    /// ```
+    #[inline]
+    pub fn get_mut(&mut self, id: A::Id) -> Option<&mut T> {
+        if A::arena_id(id) != self.arena_id {
+            None
+        } else {
+            self.items.get_mut(A::index(id))
+        }
+    }
+
+    /// Iterate over this arena's items and their ids.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<&str>::new();
+    ///
+    /// arena.alloc("hello");
+    /// arena.alloc("hi");
+    /// arena.alloc("yo");
+    ///
+    /// for (id, s) in arena.iter() {
+    ///     assert_eq!(arena.get(id).unwrap(), s);
+    ///     println!("{:?} -> {}", id, s);
+    /// }
+    /// ```
+    #[inline]
+    pub fn iter(&self) -> Iter<T, A> {
+        IntoIterator::into_iter(self)
+    }
+
+    /// Iterate over this arena's items and their ids, allowing mutation of each
+    /// item.
+    #[inline]
+    pub fn iter_mut(&mut self) -> IterMut<T, A> {
+        IntoIterator::into_iter(self)
+    }
+
+    /// Get the number of objects allocated in this arena.
+    ///
+    /// ```
+    /// use id_arena::Arena;
+    ///
+    /// let mut arena = Arena::<&str>::new();
+    ///
+    /// arena.alloc("hello");
+    /// arena.alloc("hi");
+    ///
+    /// assert_eq!(arena.len(), 2);
+    /// ```
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.items.len()
+    }
+}
+
+impl<T, A> ops::Index<A::Id> for Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    type Output = T;
+
+    #[inline]
+    fn index(&self, id: A::Id) -> &T {
+        assert_eq!(self.arena_id, A::arena_id(id));
+        &self.items[A::index(id)]
+    }
+}
+
+impl<T, A> ops::IndexMut<A::Id> for Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    #[inline]
+    fn index_mut(&mut self, id: A::Id) -> &mut T {
+        assert_eq!(self.arena_id, A::arena_id(id));
+        &mut self.items[A::index(id)]
+    }
+}
+
+fn add_id<A, T>(item: Option<(usize, T)>, arena_id: u32) -> Option<(A::Id, T)>
+where
+    A: ArenaBehavior,
+{
+    item.map(|(idx, item)| (A::new_id(arena_id, idx), item))
+}
+
+/// An iterator over `(Id, &T)` pairs in an arena.
+///
+/// See [the `Arena::iter()` method](./struct.Arena.html#method.iter) for details.
+#[derive(Debug)]
+pub struct Iter<'a, T: 'a, A: 'a> {
+    arena_id: u32,
+    iter: iter::Enumerate<slice::Iter<'a, T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<'a, T: 'a, A: 'a> Iterator for Iter<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, &'a T);
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next(), self.arena_id)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.iter.size_hint()
+    }
+}
+
+impl<'a, T: 'a, A: 'a> DoubleEndedIterator for Iter<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next_back(), self.arena_id)
+    }
+}
+
+impl<'a, T: 'a, A: 'a> ExactSizeIterator for Iter<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, &'a T);
+    type IntoIter = Iter<'a, T, A>;
+
+    #[inline]
+    fn into_iter(self) -> Iter<'a, T, A> {
+        Iter {
+            arena_id: self.arena_id,
+            iter: self.items.iter().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+}
+
+/// An iterator over `(Id, &mut T)` pairs in an arena.
+///
+/// See [the `Arena::iter_mut()` method](./struct.Arena.html#method.iter_mut)
+/// for details.
+#[derive(Debug)]
+pub struct IterMut<'a, T: 'a, A: 'a> {
+    arena_id: u32,
+    iter: iter::Enumerate<slice::IterMut<'a, T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<'a, T: 'a, A: 'a> Iterator for IterMut<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, &'a mut T);
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next(), self.arena_id)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.iter.size_hint()
+    }
+}
+
+impl<'a, T: 'a, A: 'a> DoubleEndedIterator for IterMut<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next_back(), self.arena_id)
+    }
+}
+
+impl<'a, T: 'a, A: 'a> ExactSizeIterator for IterMut<'a, T, A>
+where
+    A: ArenaBehavior,
+{
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+}
+
+impl<'a, T, A> IntoIterator for &'a mut Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, &'a mut T);
+    type IntoIter = IterMut<'a, T, A>;
+
+    #[inline]
+    fn into_iter(self) -> IterMut<'a, T, A> {
+        IterMut {
+            arena_id: self.arena_id,
+            iter: self.items.iter_mut().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+}
+
+/// An iterator over `(Id, T)` pairs in an arena.
+#[derive(Debug)]
+pub struct IntoIter<T, A> {
+    arena_id: u32,
+    iter: iter::Enumerate<vec::IntoIter<T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<T, A> Iterator for IntoIter<T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, T);
+
+    #[inline]
+    fn next(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next(), self.arena_id)
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.iter.size_hint()
+    }
+}
+
+impl<T, A> DoubleEndedIterator for IntoIter<T, A>
+where
+    A: ArenaBehavior,
+{
+    fn next_back(&mut self) -> Option<Self::Item> {
+        add_id::<A, _>(self.iter.next_back(), self.arena_id)
+    }
+}
+
+impl<T, A> ExactSizeIterator for IntoIter<T, A>
+where
+    A: ArenaBehavior,
+{
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+}
+
+impl<T, A> IntoIterator for Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    type Item = (A::Id, T);
+    type IntoIter = IntoIter<T, A>;
+
+    #[inline]
+    fn into_iter(self) -> IntoIter<T, A> {
+        IntoIter {
+            arena_id: self.arena_id,
+            iter: self.items.into_iter().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn ids_are_send_sync() {
+        fn assert_send_sync<T: Send + Sync>() {}
+        struct Foo;
+        assert_send_sync::<Id<Foo>>();
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/src/rayon.rs
@@ -0,0 +1,282 @@
+extern crate rayon;
+
+use self::rayon::iter::plumbing::{Consumer, UnindexedConsumer};
+use self::rayon::iter::plumbing::ProducerCallback;
+use self::rayon::prelude::*;
+use super::*;
+
+impl<T, A> Arena<T, A>
+where
+    A: ArenaBehavior,
+{
+    /// Returns an iterator of shared references which can be used to iterate
+    /// over this arena in parallel with the `rayon` crate.
+    ///
+    /// # Features
+    ///
+    /// This API requires the `rayon` feature of this crate to be enabled.
+    pub fn par_iter(&self) -> ParIter<T, A>
+    where
+        T: Sync,
+        A::Id: Send,
+    {
+        ParIter {
+            arena_id: self.arena_id,
+            iter: self.items.par_iter().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+
+    /// Returns an iterator of mutable references which can be used to iterate
+    /// over this arena in parallel with the `rayon` crate.
+    ///
+    /// # Features
+    ///
+    /// This API requires the `rayon` feature of this crate to be enabled.
+    pub fn par_iter_mut(&mut self) -> ParIterMut<T, A>
+    where
+        T: Send + Sync,
+        A::Id: Send,
+    {
+        ParIterMut {
+            arena_id: self.arena_id,
+            iter: self.items.par_iter_mut().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+}
+
+/// A parallel iterator over shared references in an arena.
+///
+/// See `Arena::par_iter` for more information.
+#[derive(Debug)]
+pub struct ParIter<'a, T, A>
+where
+    T: Sync,
+{
+    arena_id: u32,
+    iter: rayon::iter::Enumerate<rayon::slice::Iter<'a, T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<'a, T, A> ParallelIterator for ParIter<'a, T, A>
+where
+    T: Sync,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    type Item = (A::Id, &'a T);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+    where
+        C: UnindexedConsumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive_unindexed(consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        self.iter.opt_len()
+    }
+}
+
+impl<'a, T, A> IndexedParallelIterator for ParIter<'a, T, A>
+where
+    T: Sync,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+    where
+        C: Consumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive(consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+    where
+        CB: ProducerCallback<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .with_producer(callback)
+    }
+}
+
+impl<'data, T, A> IntoParallelIterator for &'data Arena<T, A>
+    where A: ArenaBehavior,
+          A::Id: Send,
+          T: Sync,
+{
+    type Item = (A::Id, &'data T);
+    type Iter = ParIter<'data, T, A>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        self.par_iter()
+    }
+}
+
+/// A parallel iterator over mutable references in an arena.
+///
+/// See `Arena::par_iter_mut` for more information.
+#[derive(Debug)]
+pub struct ParIterMut<'a, T, A>
+where
+    T: Send + Sync,
+{
+    arena_id: u32,
+    iter: rayon::iter::Enumerate<rayon::slice::IterMut<'a, T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<'a, T, A> ParallelIterator for ParIterMut<'a, T, A>
+where
+    T: Send + Sync,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    type Item = (A::Id, &'a mut T);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+    where
+        C: UnindexedConsumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive_unindexed(consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        self.iter.opt_len()
+    }
+}
+
+impl<'a, T, A> IndexedParallelIterator for ParIterMut<'a, T, A>
+where
+    T: Send + Sync,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+    where
+        C: Consumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive(consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+    where
+        CB: ProducerCallback<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .with_producer(callback)
+    }
+}
+
+impl<'data, T, A> IntoParallelIterator for &'data mut Arena<T, A>
+    where A: ArenaBehavior,
+          A::Id: Send,
+          T: Send + Sync,
+{
+    type Item = (A::Id, &'data mut T);
+    type Iter = ParIterMut<'data, T, A>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        self.par_iter_mut()
+    }
+}
+
+/// A parallel iterator over items in an arena.
+///
+/// See `Arena::into_par_iter` for more information.
+#[derive(Debug)]
+pub struct IntoParIter<T, A>
+where
+    T: Send,
+{
+    arena_id: u32,
+    iter: rayon::iter::Enumerate<rayon::vec::IntoIter<T>>,
+    _phantom: PhantomData<fn() -> A>,
+}
+
+impl<T, A> ParallelIterator for IntoParIter<T, A>
+where
+    T: Send,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    type Item = (A::Id, T);
+
+    fn drive_unindexed<C>(self, consumer: C) -> C::Result
+    where
+        C: UnindexedConsumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive_unindexed(consumer)
+    }
+
+    fn opt_len(&self) -> Option<usize> {
+        self.iter.opt_len()
+    }
+}
+
+impl<T, A> IndexedParallelIterator for IntoParIter<T, A>
+where
+    T: Send,
+    A: ArenaBehavior,
+    A::Id: Send,
+{
+    fn drive<C>(self, consumer: C) -> C::Result
+    where
+        C: Consumer<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .drive(consumer)
+    }
+
+    fn len(&self) -> usize {
+        self.iter.len()
+    }
+
+    fn with_producer<CB>(self, callback: CB) -> CB::Output
+    where
+        CB: ProducerCallback<Self::Item>,
+    {
+        let arena_id = self.arena_id;
+        self.iter.map(|(i, item)| (A::new_id(arena_id, i), item))
+            .with_producer(callback)
+    }
+}
+
+impl<T, A> IntoParallelIterator for Arena<T, A>
+    where A: ArenaBehavior,
+          A::Id: Send,
+          T: Send,
+{
+    type Item = (A::Id, T);
+    type Iter = IntoParIter<T, A>;
+
+    fn into_par_iter(self) -> Self::Iter {
+        IntoParIter {
+            arena_id: self.arena_id,
+            iter: self.items.into_par_iter().enumerate(),
+            _phantom: PhantomData,
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/id-arena/tests/readme_up_to_date.rs
@@ -0,0 +1,22 @@
+use std::fs;
+use std::process::Command;
+
+#[test]
+fn cargo_readme_up_to_date() {
+    println!("Checking that `cargo readme > README.md` is up to date...");
+
+    let expected = Command::new("cargo")
+        .arg("readme")
+        .current_dir(env!("CARGO_MANIFEST_DIR"))
+        .output()
+        .expect("should run `cargo readme` OK")
+        .stdout;
+    let expected = String::from_utf8_lossy(&expected);
+
+    let actual = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md"))
+        .expect("should read README.md OK");
+
+    if actual != expected {
+        panic!("Run `cargo readme > README.md` to update README.md");
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"c95c530d76b891215cce4342a806bbc1747ab4d62f54330d932dafb542fa1a56","Cargo.toml":"00eb8afcb73a205013caf49fff1378a2304269f87f9d79beece7039f9bfb5ccf","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"e28eb7d26ddd6dd71e1757f4eab63044b5c430932ef3c3a24e9772ddc78ebf85","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"ddc3997e394a30ad82d78d2675a48c4617353f88b89bb9a3df5a3804d59b8ef9","examples/simple-store.rs":"cae63e39f2f98ee6ac2f387dcb02d6b929828a74f32f7d18d69c7fc9c3cce765","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/bin/dump.rs":"da8543848e57893902751f4c4745e835b9c86263da2344af18d5717014f645f5","src/bin/rand.rs":"3da924fa0f1a118f606e2b94aee3a0553d9ebdbd17ee0152b85148adbf521bba","src/env.rs":"5deac6b35e49da1d47d7c852ed2e30ef96b6d15998fe7a79479cec64697626fc","src/error.rs":"f2cbab99691f36c98c24d297de3a303de258ddd3a06e2f54cb5efce20eb3740b","src/lib.rs":"4fe4e7d6a912a850b709ed23e372acd4f214890066322b4720376f7772bb776e","src/manager.rs":"ff2d76056e3a7200035b2e75c5bc2159f337e59c076dddd2476e3094b6ae3741","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"fde695333e4845f4f53d63da6281f585919e2a3ac5cfe00d173cc139bc822763","src/store.rs":"409d13b1ea0d1254dae947ecbce50e741fb71c3ca118a78803b734336dce6a8f","src/store/integer.rs":"f386474c971f671c9b316a16ebff5b586be6837c886f443753ae13277a7e0070","src/store/integermulti.rs":"1a0912f97619297da31cc8c146e38941b88539d2857df81191a49c8dbd18625d","src/store/multi.rs":"2dec01c2202a2c9069cced4e1e42906b01d0b85df25d17e0ea810c05fa8395d0","src/store/single.rs":"c55c3600714f5ed9e820b16c2335ae00a0071174e0a32b9df89a34182a4b908c","src/value.rs":"7fae77a8291b951591e557ec694bfdadc9eb78557dad36a970cfcdcfb83fd238","tests/integer-store.rs":"f7e06c71b0dead2323c7c61fc8bcbffbdd3a4796eebf6138db9cce3dbba716a3","tests/manager.rs":"97ec61145dc227f4f5fbcb6449c096bbe5b9a09db4e61ff4491c0443fe9adf26","tests/multi-integer-store.rs":"83295b0135c502321304aa06b05d5a9eeab41b1438ed7ddf2cb1a3613dfef4d9","tests/test_txn.rs":"f486d8bd485398e49ae64eac59ca3b44dfa7f8340aab17483cd3e9864fadd88b"},"package":"9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/CODE_OF_CONDUCT.md
@@ -0,0 +1,15 @@
+# Community Participation Guidelines
+
+This repository is governed by Mozilla's code of conduct and etiquette guidelines. 
+For more details, please read the
+[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 
+
+## How to Report
+For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page.
+
+<!--
+## Project Specific Etiquette
+
+In some cases, there will be additional project etiquette i.e.: (https://bugzilla.mozilla.org/page.cgi?id=etiquette.html).
+Please update for your project.
+-->
rename from third_party/rust/rkv/Cargo.lock
rename to third_party/rust/rkv-0.10.2/Cargo.lock
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/Cargo.toml
@@ -0,0 +1,75 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "rkv"
+version = "0.10.2"
+authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
+exclude = ["/tests/envs/*"]
+description = "a simple, humane, typed Rust interface to LMDB"
+homepage = "https://github.com/mozilla/rkv"
+documentation = "https://docs.rs/rkv"
+readme = "README.md"
+keywords = ["lmdb", "database", "storage"]
+categories = ["database"]
+license = "Apache-2.0"
+repository = "https://github.com/mozilla/rkv"
+[dependencies.arrayref]
+version = "0.3"
+
+[dependencies.bincode]
+version = "1.0"
+
+[dependencies.bitflags]
+version = "1"
+
+[dependencies.byteorder]
+version = "1"
+
+[dependencies.failure]
+version = "0.1"
+features = ["derive"]
+default_features = false
+
+[dependencies.lazy_static]
+version = "1.0"
+
+[dependencies.lmdb-rkv]
+version = "0.12.3"
+
+[dependencies.ordered-float]
+version = "1.0"
+
+[dependencies.serde]
+version = "1.0"
+
+[dependencies.serde_derive]
+version = "1.0"
+
+[dependencies.url]
+version = "2.0"
+
+[dependencies.uuid]
+version = "0.7"
+[dev-dependencies.byteorder]
+version = "1"
+
+[dev-dependencies.tempfile]
+version = "3"
+
+[features]
+backtrace = ["failure/backtrace", "failure/std"]
+default = []
+with-asan = ["lmdb-rkv/with-asan"]
+with-fuzzer = ["lmdb-rkv/with-fuzzer"]
+with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/README.md
@@ -0,0 +1,55 @@
+[![Travis CI Build Status](https://travis-ci.org/mozilla/rkv.svg?branch=master)](https://travis-ci.org/mozilla/rkv)
+[![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/lk936u5y5bi6qafb/branch/master?svg=true)](https://ci.appveyor.com/project/mykmelez/rkv/branch/master)
+[![Documentation](https://docs.rs/rkv/badge.svg)](https://docs.rs/rkv/)
+[![Crate](https://img.shields.io/crates/v/rkv.svg)](https://crates.io/crates/rkv)
+
+# rkv
+
+The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed Rust interface to [LMDB](http://www.lmdb.tech/doc/).
+
+## Use
+
+Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which you can also generate for local consumption:
+
+```sh
+cargo doc --open
+```
+
+## Build
+
+Build this project as you would build other Rust crates:
+
+```sh
+cargo build
+```
+
+If you specify the `backtrace` feature, backtraces will be enabled in `failure`
+errors. This feature is disabled by default.
+
+## Test
+
+Test this project as you would test other Rust crates:
+
+```sh
+cargo test
+```
+
+The project includes unit and doc tests embedded in the `src/` files, integration tests in the `tests/` subdirectory, and usage examples in the `examples/` subdirectory. To ensure your changes don't break examples, also run them via the run-all-examples.sh shell script:
+
+```sh
+./run-all-examples.sh
+```
+
+Note: the test fixtures in the `tests/envs/` subdirectory aren't included in the package published to crates.io, so you must clone this repository in order to run the tests that depend on those fixtures or use the `rand` and `dump` executables to recreate them.
+
+## Contribute
+
+Of the various open source archetypes described in [A Framework for Purposeful Open Source](https://medium.com/mozilla-open-innovation/whats-your-open-source-strategy-here-are-10-answers-383221b3f9d3), the rkv project most closely resembles the Specialty Library, and we welcome contributions. Please report problems or ask questions using this repo's GitHub [issue tracker](https://github.com/mozilla/rkv/issues) and submit [pull requests](https://github.com/mozilla/rkv/pulls) for code and documentation changes.
+
+rkv relies on the latest [rustfmt](https://github.com/rust-lang-nursery/rustfmt) for code formatting, so please make sure your pull request passes the rustfmt before submitting it for review. See rustfmt's [quick start](https://github.com/rust-lang-nursery/rustfmt#quick-start) for installation details.
+
+We follow Mozilla's [Community Participation Guidelines](https://www.mozilla.org/en-US/about/governance/policies/participation/) while contributing to this project.
+
+## License
+
+The rkv source code is licensed under the Apache License, Version 2.0, as described in the [LICENSE](https://github.com/mozilla/rkv/blob/master/LICENSE) file.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/examples/README.md
@@ -0,0 +1,11 @@
+## Examples of how to use rkv
+
+All examples can be executed with:
+
+```
+cargo run --example $name
+```
+
+* [`simple-store`](simple-store.rs) - a simple key/value store that showcases the basic usage of rkv.
+
+* [`iterator`](iterator.rs) - a demo that showcases the basic usage of iterators in rkv.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/examples/iterator.rs
@@ -0,0 +1,75 @@
+// Any copyright is dedicated to the Public Domain.
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+//! A demo that showcases the basic usage of iterators in rkv.
+//!
+//! You can test this out by running:
+//!
+//!     cargo run --example iterator
+
+use rkv::{
+    Manager,
+    Rkv,
+    SingleStore,
+    StoreError,
+    StoreOptions,
+    Value,
+};
+use tempfile::Builder;
+
+use std::fs;
+use std::str;
+
+fn main() {
+    let root = Builder::new().prefix("iterator").tempdir().unwrap();
+    fs::create_dir_all(root.path()).unwrap();
+    let p = root.path();
+
+    let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new).unwrap();
+    let k = created_arc.read().unwrap();
+    let store = k.open_single("store", StoreOptions::create()).unwrap();
+
+    populate_store(&k, store).unwrap();
+
+    let reader = k.read().unwrap();
+
+    println!("Iterating from the beginning...");
+    // Reader::iter_start() iterates from the first item in the store, and
+    // returns the (key, value) tuples in order.
+    let mut iter = store.iter_start(&reader).unwrap();
+    while let Some(Ok((country, city))) = iter.next() {
+        println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
+    }
+
+    println!();
+    println!("Iterating from the given key...");
+    // Reader::iter_from() iterates from the first key equal to or greater
+    // than the given key.
+    let mut iter = store.iter_from(&reader, "Japan").unwrap();
+    while let Some(Ok((country, city))) = iter.next() {
+        println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
+    }
+
+    println!();
+    println!("Iterating from the given prefix...");
+    let mut iter = store.iter_from(&reader, "Un").unwrap();
+    while let Some(Ok((country, city))) = iter.next() {
+        println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
+    }
+}
+
+fn populate_store(k: &Rkv, store: SingleStore) -> Result<(), StoreError> {
+    let mut writer = k.write()?;
+    for (country, city) in vec![
+        ("Canada", Value::Str("Ottawa")),
+        ("United States of America", Value::Str("Washington")),
+        ("Germany", Value::Str("Berlin")),
+        ("France", Value::Str("Paris")),
+        ("Italy", Value::Str("Rome")),
+        ("United Kingdom", Value::Str("London")),
+        ("Japan", Value::Str("Tokyo")),
+    ] {
+        store.put(&mut writer, country, &city)?;
+    }
+    writer.commit()
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/examples/simple-store.rs
@@ -0,0 +1,183 @@
+// Any copyright is dedicated to the Public Domain.
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+//! A simple rkv demo that showcases the basic usage (put/get/delete) of rkv.
+//!
+//! You can test this out by running:
+//!
+//!     cargo run --example simple-store
+
+use rkv::{
+    Manager,
+    MultiStore,
+    Rkv,
+    StoreOptions,
+    Value,
+    Writer,
+};
+use tempfile::Builder;
+
+use std::fs;
+
+fn getput<'env, 's>(store: MultiStore, writer: &'env mut Writer, ids: &'s mut Vec<String>) {
+    let keys = vec!["str1", "str2", "str3"];
+    // we convert the writer into a cursor so that we can safely read
+    for k in keys.iter() {
+        // this is a multi-valued database, so get returns an iterator
+        let mut iter = store.get(writer, k).unwrap();
+        while let Some(Ok((_key, val))) = iter.next() {
+            if let Value::Str(s) = val.unwrap() {
+                ids.push(s.to_owned());
+            } else {
+                panic!("didn't get a string back!");
+            }
+        }
+    }
+    for id in ids {
+        store.put(writer, &id, &Value::Blob(b"weeeeeee")).unwrap();
+    }
+}
+
+fn delete(store: MultiStore, writer: &mut Writer) {
+    let keys = vec!["str1", "str2", "str3"];
+    let vals = vec!["string uno", "string quatro", "string siete"];
+    // we convert the writer into a cursor so that we can safely read
+    for i in 0..keys.len() {
+        store.delete(writer, &keys[i], &Value::Str(vals[i])).unwrap();
+    }
+}
+
+fn main() {
+    let root = Builder::new().prefix("simple-db").tempdir().unwrap();
+    fs::create_dir_all(root.path()).unwrap();
+    let p = root.path();
+
+    // The manager enforces that each process opens the same lmdb environment at most once
+    let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new).unwrap();
+    let k = created_arc.read().unwrap();
+
+    // Creates a store called "store"
+    let store = k.open_single("store", StoreOptions::create()).unwrap();
+
+    let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap();
+
+    println!("Inserting data...");
+    {
+        // Use a writer to mutate the store
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
+        store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
+        store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap();
+        store.put(&mut writer, "instant", &Value::Instant(1_528_318_073_700)).unwrap();
+        store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap();
+        store.put(&mut writer, "string", &Value::Str("héllo, yöu")).unwrap();
+        store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap();
+        store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap();
+        writer.commit().unwrap();
+    }
+
+    println!("Testing getput");
+    {
+        let mut ids = Vec::new();
+        let mut writer = k.write().unwrap();
+        multistore.put(&mut writer, "str1", &Value::Str("string uno")).unwrap();
+        multistore.put(&mut writer, "str1", &Value::Str("string dos")).unwrap();
+        multistore.put(&mut writer, "str1", &Value::Str("string tres")).unwrap();
+        multistore.put(&mut writer, "str2", &Value::Str("string quatro")).unwrap();
+        multistore.put(&mut writer, "str2", &Value::Str("string cinco")).unwrap();
+        multistore.put(&mut writer, "str2", &Value::Str("string seis")).unwrap();
+        multistore.put(&mut writer, "str3", &Value::Str("string siete")).unwrap();
+        multistore.put(&mut writer, "str3", &Value::Str("string ocho")).unwrap();
+        multistore.put(&mut writer, "str3", &Value::Str("string nueve")).unwrap();
+        getput(multistore, &mut writer, &mut ids);
+        writer.commit().unwrap();
+        let mut writer = k.write().unwrap();
+        delete(multistore, &mut writer);
+        writer.commit().unwrap();
+    }
+    println!("Looking up keys...");
+    {
+        // Use a reader to query the store
+        let reader = k.read().unwrap();
+        println!("Get int {:?}", store.get(&reader, "int").unwrap());
+        println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
+        println!("Get float {:?}", store.get(&reader, "float").unwrap());
+        println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
+        println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
+        println!("Get string {:?}", store.get(&reader, "string").unwrap());
+        println!("Get json {:?}", store.get(&reader, "json").unwrap());
+        println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
+        println!("Get non-existent {:?}", store.get(&reader, "non-existent").unwrap());
+    }
+
+    println!("Looking up keys via Writer.get()...");
+    {
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+        store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
+        store.delete(&mut writer, "foo").unwrap();
+        println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
+        println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
+        writer.commit().unwrap();
+        let reader = k.read().expect("reader");
+        println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+        println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
+    }
+
+    println!("Aborting transaction...");
+    {
+        // Aborting a write transaction rollbacks the change(s)
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+        writer.abort();
+
+        let reader = k.read().expect("reader");
+        println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+        // Explicitly aborting a transaction is not required unless an early
+        // abort is desired, since both read and write transactions will
+        // implicitly be aborted once they go out of scope.
+    }
+
+    println!("Deleting keys...");
+    {
+        // Deleting a key/value also requires a write transaction
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+        store.delete(&mut writer, "foo").unwrap();
+        println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
+        writer.commit().unwrap();
+
+        // Committing a transaction consumes the writer, preventing you
+        // from reusing it by failing and reporting a compile-time error.
+        // This line would report error[E0382]: use of moved value: `writer`.
+        // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
+    }
+
+    println!("Clearing store...");
+    {
+        // Clearing a store deletes all the entries in that store
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+        store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
+        store.clear(&mut writer).unwrap();
+        writer.commit().unwrap();
+
+        let reader = k.read().expect("reader");
+        println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+        println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap());
+    }
+
+    println!("Write and read on multiple stores...");
+    {
+        let another_store = k.open_single("another_store", StoreOptions::create()).unwrap();
+        let mut writer = k.write().unwrap();
+        store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+        another_store.put(&mut writer, "foo", &Value::Str("baz")).unwrap();
+        writer.commit().unwrap();
+
+        let reader = k.read().unwrap();
+        println!("Get from store value: {:?}", store.get(&reader, "foo").unwrap());
+        println!("Get from another store value: {:?}", another_store.get(&reader, "foo").unwrap());
+    }
+    println!("Environment statistics: btree depth = {}", k.stat().unwrap().depth());
+}
new file mode 100755
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/run-all-examples.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+cargo build --examples
+
+for file in examples/*; do
+    filename=$(basename ${file})
+    extension=${filename##*.}
+    example_name=${filename%.*}
+    if [[ "${extension}" = "rs" ]]; then
+        cargo run --example ${example_name}
+    fi
+done
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/bin/dump.rs
@@ -0,0 +1,55 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+extern crate rkv;
+
+use rkv::{
+    error::MigrateError,
+    migrate::Migrator,
+};
+use std::{
+    env::args,
+    io,
+    path::Path,
+};
+
+fn main() -> Result<(), MigrateError> {
+    let mut cli_args = args();
+    let mut db_name = None;
+    let mut env_path = None;
+
+    // The first arg is the name of the program, which we can ignore.
+    cli_args.next();
+
+    while let Some(arg) = cli_args.next() {
+        if &arg[0..1] == "-" {
+            match &arg[1..] {
+                "s" => {
+                    db_name = match cli_args.next() {
+                        None => return Err("-s must be followed by database name".into()),
+                        Some(str) => Some(str),
+                    };
+                },
+                str => return Err(format!("arg -{} not recognized", str).into()),
+            }
+        } else {
+            if env_path.is_some() {
+                return Err("must provide only one path to the LMDB environment".into());
+            }
+            env_path = Some(arg);
+        }
+    }
+
+    let env_path = env_path.ok_or("must provide a path to the LMDB environment")?;
+    let mut migrator: Migrator = Migrator::new(Path::new(&env_path))?;
+    migrator.dump(db_name.as_ref().map(String::as_str), io::stdout()).unwrap();
+
+    Ok(())
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/bin/rand.rs
@@ -0,0 +1,114 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//! A command-line utility to create an LMDB environment containing random data.
+//! It requires one flag, `-s path/to/environment`, which specifies the location
+//! where the tool should create the environment.  Optionally, you may specify
+//! the number of key/value pairs to create via the `-n <number>` flag
+//! (for which the default value is 50).
+
+extern crate rkv;
+
+use rkv::{
+    Rkv,
+    SingleStore,
+    StoreOptions,
+    Value,
+};
+use std::{
+    env::args,
+    fs::{
+        create_dir_all,
+        File,
+    },
+    io::Read,
+    path::Path,
+};
+
+fn main() {
+    let mut args = args();
+    let mut database = None;
+    let mut path = None;
+    let mut num_pairs = 50;
+
+    // The first arg is the name of the program, which we can ignore.
+    args.next();
+
+    while let Some(arg) = args.next() {
+        if &arg[0..1] == "-" {
+            match &arg[1..] {
+                "s" => {
+                    database = match args.next() {
+                        None => panic!("-s must be followed by database arg"),
+                        Some(str) => Some(str),
+                    };
+                },
+                "n" => {
+                    num_pairs = match args.next() {
+                        None => panic!("-s must be followed by number of pairs"),
+                        Some(str) => str.parse().expect("number"),
+                    };
+                },
+                str => panic!("arg -{} not recognized", str),
+            }
+        } else {
+            if path.is_some() {
+                panic!("must provide only one path to the LMDB environment");
+            }
+            path = Some(arg);
+        }
+    }
+
+    if path.is_none() {
+        panic!("must provide a path to the LMDB environment");
+    }
+    let path = path.unwrap();
+
+    create_dir_all(&path).expect("dir created");
+
+    let mut builder = Rkv::environment_builder();
+    builder.set_max_dbs(2);
+    // Allocate enough map to accommodate the largest random collection.
+    // We currently do this by allocating twice the maximum possible size
+    // of the pairs (assuming maximum key and value sizes).
+    builder.set_map_size((511 + 65535) * num_pairs * 2);
+    let rkv = Rkv::from_env(Path::new(&path), builder).expect("Rkv");
+    let store: SingleStore =
+        rkv.open_single(database.as_ref().map(|x| x.as_str()), StoreOptions::create()).expect("opened");
+    let mut writer = rkv.write().expect("writer");
+
+    // Generate random values for the number of keys and key/value lengths.
+    // On Linux, "Just use /dev/urandom!" <https://www.2uo.de/myths-about-urandom/>.
+    // On macOS it doesn't matter (/dev/random and /dev/urandom are identical).
+    let mut random = File::open("/dev/urandom").unwrap();
+    let mut nums = [0u8; 4];
+    random.read_exact(&mut nums).unwrap();
+
+    // Generate 0–255 pairs.
+    for _ in 0..num_pairs {
+        // Generate key and value lengths.  The key must be 1–511 bytes long.
+        // The value length can be 0 and is essentially unbounded; we generate
+        // value lengths of 0–0xffff (65535).
+        // NB: the modulus method for generating a random number within a range
+        // introduces distribution skew, but we don't need it to be perfect.
+        let key_len = ((u16::from(nums[0]) + (u16::from(nums[1]) << 8)) % 511 + 1) as usize;
+        let value_len = (u16::from(nums[2]) + (u16::from(nums[3]) << 8)) as usize;
+
+        let mut key: Vec<u8> = vec![0; key_len];
+        random.read_exact(&mut key[0..key_len]).unwrap();
+
+        let mut value: Vec<u8> = vec![0; value_len];
+        random.read_exact(&mut value[0..value_len]).unwrap();
+
+        store.put(&mut writer, key, &Value::Blob(&value)).expect("wrote");
+    }
+
+    writer.commit().expect("committed");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/env.rs
@@ -0,0 +1,1336 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::os::raw::c_uint;
+
+use std::path::{
+    Path,
+    PathBuf,
+};
+
+use lmdb;
+
+use lmdb::{
+    Database,
+    DatabaseFlags,
+    Environment,
+    EnvironmentBuilder,
+    Error,
+    Info,
+    Stat,
+};
+
+use crate::error::StoreError;
+use crate::readwrite::{
+    Reader,
+    Writer,
+};
+use crate::store::integer::{
+    IntegerStore,
+    PrimitiveInt,
+};
+
+use crate::store::integermulti::MultiIntegerStore;
+use crate::store::multi::MultiStore;
+use crate::store::single::SingleStore;
+use crate::store::Options as StoreOptions;
+
+pub static DEFAULT_MAX_DBS: c_uint = 5;
+
+/// Wrapper around an `lmdb::Environment`.
+#[derive(Debug)]
+pub struct Rkv {
+    path: PathBuf,
+    env: Environment,
+}
+
+/// Static methods.
+impl Rkv {
+    pub fn environment_builder() -> EnvironmentBuilder {
+        Environment::new()
+    }
+
+    /// Return a new Rkv environment that supports up to `DEFAULT_MAX_DBS` open databases.
+    #[allow(clippy::new_ret_no_self)]
+    pub fn new(path: &Path) -> Result<Rkv, StoreError> {
+        Rkv::with_capacity(path, DEFAULT_MAX_DBS)
+    }
+
+    /// Return a new Rkv environment from the provided builder.
+    pub fn from_env(path: &Path, env: EnvironmentBuilder) -> Result<Rkv, StoreError> {
+        if !path.is_dir() {
+            return Err(StoreError::DirectoryDoesNotExistError(path.into()));
+        }
+
+        Ok(Rkv {
+            path: path.into(),
+            env: env.open(path).map_err(|e| match e {
+                lmdb::Error::Other(2) => StoreError::DirectoryDoesNotExistError(path.into()),
+                e => StoreError::LmdbError(e),
+            })?,
+        })
+    }
+
+    /// Return a new Rkv environment that supports the specified number of open databases.
+    pub fn with_capacity(path: &Path, max_dbs: c_uint) -> Result<Rkv, StoreError> {
+        if !path.is_dir() {
+            return Err(StoreError::DirectoryDoesNotExistError(path.into()));
+        }
+
+        let mut builder = Rkv::environment_builder();
+        builder.set_max_dbs(max_dbs);
+
+        // Future: set flags, maximum size, etc. here if necessary.
+        Rkv::from_env(path, builder)
+    }
+}
+
+/// Store creation methods.
+impl Rkv {
+    /// Create or Open an existing database in (&[u8] -> Single Value) mode.
+    /// Note: that create=true cannot be called concurrently with other operations
+    /// so if you are sure that the database exists, call this with create=false.
+    pub fn open_single<'s, T>(&self, name: T, opts: StoreOptions) -> Result<SingleStore, StoreError>
+    where
+        T: Into<Option<&'s str>>,
+    {
+        self.open(name, opts).map(SingleStore::new)
+    }
+
+    /// Create or Open an existing database in (Integer -> Single Value) mode.
+    /// Note: that create=true cannot be called concurrently with other operations
+    /// so if you are sure that the database exists, call this with create=false.
+    pub fn open_integer<'s, T, K: PrimitiveInt>(
+        &self,
+        name: T,
+        mut opts: StoreOptions,
+    ) -> Result<IntegerStore<K>, StoreError>
+    where
+        T: Into<Option<&'s str>>,
+    {
+        opts.flags.set(DatabaseFlags::INTEGER_KEY, true);
+        self.open(name, opts).map(IntegerStore::new)
+    }
+
+    /// Create or Open an existing database in (&[u8] -> Multiple Values) mode.
+    /// Note: that create=true cannot be called concurrently with other operations
+    /// so if you are sure that the database exists, call this with create=false.
+    pub fn open_multi<'s, T>(&self, name: T, mut opts: StoreOptions) -> Result<MultiStore, StoreError>
+    where
+        T: Into<Option<&'s str>>,
+    {
+        opts.flags.set(DatabaseFlags::DUP_SORT, true);
+        self.open(name, opts).map(MultiStore::new)
+    }
+
+    /// Create or Open an existing database in (Integer -> Multiple Values) mode.
+    /// Note: that create=true cannot be called concurrently with other operations
+    /// so if you are sure that the database exists, call this with create=false.
+    pub fn open_multi_integer<'s, T, K: PrimitiveInt>(
+        &self,
+        name: T,
+        mut opts: StoreOptions,
+    ) -> Result<MultiIntegerStore<K>, StoreError>
+    where
+        T: Into<Option<&'s str>>,
+    {
+        opts.flags.set(DatabaseFlags::INTEGER_KEY, true);
+        opts.flags.set(DatabaseFlags::DUP_SORT, true);
+        self.open(name, opts).map(MultiIntegerStore::new)
+    }
+
+    fn open<'s, T>(&self, name: T, opts: StoreOptions) -> Result<Database, StoreError>
+    where
+        T: Into<Option<&'s str>>,
+    {
+        if opts.create {
+            self.env.create_db(name.into(), opts.flags).map_err(|e| match e {
+                lmdb::Error::BadRslot => StoreError::open_during_transaction(),
+                _ => e.into(),
+            })
+        } else {
+            self.env.open_db(name.into()).map_err(|e| match e {
+                lmdb::Error::BadRslot => StoreError::open_during_transaction(),
+                _ => e.into(),
+            })
+        }
+    }
+}
+
+/// Read and write accessors.
+impl Rkv {
+    /// Create a read transaction.  There can be multiple concurrent readers
+    /// for an environment, up to the maximum specified by LMDB (default 126),
+    /// and you can open readers while a write transaction is active.
+    pub fn read(&self) -> Result<Reader, StoreError> {
+        Ok(Reader::new(self.env.begin_ro_txn().map_err(StoreError::from)?))
+    }
+
+    /// Create a write transaction.  There can be only one write transaction
+    /// active at any given time, so trying to create a second one will block
+    /// until the first is committed or aborted.
+    pub fn write(&self) -> Result<Writer, StoreError> {
+        Ok(Writer::new(self.env.begin_rw_txn().map_err(StoreError::from)?))
+    }
+}
+
+/// Other environment methods.
+impl Rkv {
+    /// Flush the data buffers to disk. This call is only useful, when the environment
+    /// was open with either `NO_SYNC`, `NO_META_SYNC` or `MAP_ASYNC` (see below).
+    /// The call is not valid if the environment was opened with `READ_ONLY`.
+    ///
+    /// Data is always written to disk when `transaction.commit()` is called,
+    /// but the operating system may keep it buffered.
+    /// LMDB always flushes the OS buffers upon commit as well,
+    /// unless the environment was opened with `NO_SYNC` or in part `NO_META_SYNC`.
+    ///
+    /// `force`: if true, force a synchronous flush.
+    /// Otherwise if the environment has the `NO_SYNC` flag set the flushes will be omitted,
+    /// and with `MAP_ASYNC` they will be asynchronous.
+    pub fn sync(&self, force: bool) -> Result<(), StoreError> {
+        self.env.sync(force).map_err(Into::into)
+    }
+
+    /// Retrieve statistics about this environment.
+    ///
+    /// It includes:
+    ///   * Page size in bytes
+    ///   * B-tree depth
+    ///   * Number of internal (non-leaf) pages
+    ///   * Number of leaf pages
+    ///   * Number of overflow pages
+    ///   * Number of data entries
+    pub fn stat(&self) -> Result<Stat, StoreError> {
+        self.env.stat().map_err(Into::into)
+    }
+
+    /// Retrieve information about this environment.
+    ///
+    /// It includes:
+    ///   * Map size in bytes
+    ///   * The last used page number
+    ///   * The last transaction ID
+    ///   * Max number of readers allowed
+    ///   * Number of readers in use
+    pub fn info(&self) -> Result<Info, StoreError> {
+        self.env.info().map_err(Into::into)
+    }
+
+    /// Retrieve the load ratio (# of used pages / total pages) about this environment.
+    ///
+    /// With the formular: (last_page_no - freelist_pages) / total_pages
+    pub fn load_ratio(&self) -> Result<f32, StoreError> {
+        let stat = self.stat()?;
+        let info = self.info()?;
+        let freelist = self.env.freelist()?;
+
+        let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
+        let total_pgs = info.map_size() / stat.page_size() as usize;
+        if freelist > last_pgno {
+            return Err(StoreError::LmdbError(Error::Corrupted));
+        }
+        let used_pgs = last_pgno - freelist;
+        Ok(used_pgs as f32 / total_pgs as f32)
+    }
+
+    /// Sets the size of the memory map to use for the environment.
+    ///
+    /// This can be used to resize the map when the environment is already open.
+    /// You can also use `Rkv::environment_builder()` to set the map size during
+    /// the `Rkv` initialization.
+    ///
+    /// Note:
+    ///
+    /// * No active transactions allowed when performing resizing in this process.
+    ///   It's up to the consumer to enforce that.
+    ///
+    /// * The size should be a multiple of the OS page size. Any attempt to set
+    ///   a size smaller than the space already consumed by the environment will
+    ///   be silently changed to the current size of the used space.
+    ///
+    /// * In the multi-process case, once a process resizes the map, other
+    ///   processes need to either re-open the environment, or call set_map_size
+    ///   with size 0 to update the environment. Otherwise, new transaction creation
+    ///   will fail with `LmdbError::MapResized`.
+    pub fn set_map_size(&self, size: usize) -> Result<(), StoreError> {
+        self.env.set_map_size(size).map_err(Into::into)
+    }
+}
+
+// TODO: change this back to `clippy::cognitive_complexity` when Clippy stable
+// deprecates `clippy::cyclomatic_complexity`.
+#[allow(clippy::complexity)]
+#[cfg(test)]
+mod tests {
+    use byteorder::{
+        ByteOrder,
+        LittleEndian,
+    };
+    use std::{
+        fs,
+        str,
+        sync::{
+            Arc,
+            RwLock,
+        },
+        thread,
+    };
+    use tempfile::Builder;
+
+    use super::*;
+    use crate::*;
+
+    // The default size is 1MB.
+    const DEFAULT_SIZE: usize = 1024 * 1024;
+
+    /// We can't open a directory that doesn't exist.
+    #[test]
+    fn test_open_fails() {
+        let root = Builder::new().prefix("test_open_fails").tempdir().expect("tempdir");
+        assert!(root.path().exists());
+
+        let nope = root.path().join("nope/");
+        assert!(!nope.exists());
+
+        let pb = nope.to_path_buf();
+        match Rkv::new(nope.as_path()).err() {
+            Some(StoreError::DirectoryDoesNotExistError(p)) => {
+                assert_eq!(pb, p);
+            },
+            _ => panic!("expected error"),
+        };
+    }
+
+    fn check_rkv(k: &Rkv) {
+        let _ = k.open_single("default", StoreOptions::create()).expect("created default");
+
+        let yyy = k.open_single("yyy", StoreOptions::create()).expect("opened");
+        let reader = k.read().expect("reader");
+
+        let result = yyy.get(&reader, "foo");
+        assert_eq!(None, result.expect("success but no value"));
+    }
+
+    #[test]
+    fn test_open() {
+        let root = Builder::new().prefix("test_open").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        check_rkv(&k);
+    }
+
+    #[test]
+    fn test_open_from_env() {
+        let root = Builder::new().prefix("test_open_from_env").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let mut builder = Rkv::environment_builder();
+        builder.set_max_dbs(2);
+        let k = Rkv::from_env(root.path(), builder).expect("rkv");
+
+        check_rkv(&k);
+    }
+
+    #[test]
+    #[should_panic(expected = "opened: LmdbError(DbsFull)")]
+    fn test_open_with_capacity() {
+        let root = Builder::new().prefix("test_open_with_capacity").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let k = Rkv::with_capacity(root.path(), 1).expect("rkv");
+
+        check_rkv(&k);
+
+        // This panics with "opened: LmdbError(DbsFull)" because we specified
+        // a capacity of one (database), and check_rkv already opened one
+        // (plus the default database, which doesn't count against the limit).
+        // This should really return an error rather than panicking, per
+        // <https://github.com/mozilla/lmdb-rs/issues/6>.
+        let _zzz = k.open_single("zzz", StoreOptions::create()).expect("opened");
+    }
+
+    fn get_larger_than_default_map_size_value() -> usize {
+        // The LMDB C library and lmdb Rust crate docs for setting the map size
+        // <http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>
+        // <https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentBuilder.html#method.set_map_size>
+        // both say that the default map size is 10,485,760 bytes, i.e. 10MiB.
+        //
+        // But the DEFAULT_MAPSIZE define in the LMDB code
+        // https://github.com/LMDB/lmdb/blob/26c7df88e44e31623d0802a564f24781acdefde3/libraries/liblmdb/mdb.c#L729
+        // sets the default map size to 1,048,576 bytes, i.e. 1MiB.
+        //
+        DEFAULT_SIZE + 1 /* 1,048,576 + 1 bytes, i.e. 1MiB + 1 byte */
+    }
+
+    #[test]
+    #[should_panic(expected = "wrote: LmdbError(MapFull)")]
+    fn test_exceed_map_size() {
+        let root = Builder::new().prefix("test_exceed_map_size").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("test", StoreOptions::create()).expect("opened");
+
+        // Writing a large enough value should cause LMDB to fail on MapFull.
+        // We write a string that is larger than the default map size.
+        let val = "x".repeat(get_larger_than_default_map_size_value());
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::Str(&val)).expect("wrote");
+    }
+
+    #[test]
+    #[should_panic(expected = "wrote: LmdbError(BadValSize)")]
+    fn test_exceed_key_size_limit() {
+        let root = Builder::new().prefix("test_exceed_key_size_limit").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("test", StoreOptions::create()).expect("opened");
+
+        let key = "k".repeat(512);
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, key, &Value::Str("val")).expect("wrote");
+    }
+
+    #[test]
+    fn test_increase_map_size() {
+        let root = Builder::new().prefix("test_open_with_map_size").tempdir().expect("tempdir");
+        println!("Root path: {:?}", root.path());
+        fs::create_dir_all(root.path()).expect("dir created");
+        assert!(root.path().is_dir());
+
+        let mut builder = Rkv::environment_builder();
+        // Set the map size to the size of the value we'll store in it + 100KiB,
+        // which ensures that there's enough space for the value and metadata.
+        builder.set_map_size(get_larger_than_default_map_size_value() + 100 * 1024 /* 100KiB */);
+        builder.set_max_dbs(2);
+        let k = Rkv::from_env(root.path(), builder).unwrap();
+        let sk: SingleStore = k.open_single("test", StoreOptions::create()).expect("opened");
+        let val = "x".repeat(get_larger_than_default_map_size_value());
+
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::Str(&val)).expect("wrote");
+        writer.commit().expect("committed");
+
+        let reader = k.read().unwrap();
+        assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::Str(&val)));
+    }
+
+    #[test]
+    fn test_round_trip_and_transactions() {
+        let root = Builder::new().prefix("test_round_trip_and_transactions").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+            sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote");
+            sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+            sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+            assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234)));
+            assert_eq!(sk.get(&writer, "noo").expect("read"), Some(Value::F64(1234.0.into())));
+            assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true)));
+            assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
+
+            // Isolation. Reads won't return values.
+            let r = &k.read().unwrap();
+            assert_eq!(sk.get(r, "foo").expect("read"), None);
+            assert_eq!(sk.get(r, "bar").expect("read"), None);
+            assert_eq!(sk.get(r, "baz").expect("read"), None);
+        }
+
+        // Dropped: tx rollback. Reads will still return nothing.
+
+        {
+            let r = &k.read().unwrap();
+            assert_eq!(sk.get(r, "foo").expect("read"), None);
+            assert_eq!(sk.get(r, "bar").expect("read"), None);
+            assert_eq!(sk.get(r, "baz").expect("read"), None);
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+            sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+            sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+            assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::I64(1234)));
+            assert_eq!(sk.get(&writer, "bar").expect("read"), Some(Value::Bool(true)));
+            assert_eq!(sk.get(&writer, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
+
+            writer.commit().expect("committed");
+        }
+
+        // Committed. Reads will succeed.
+        {
+            let r = k.read().unwrap();
+            assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234)));
+            assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true)));
+            assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.delete(&mut writer, "foo").expect("deleted");
+            sk.delete(&mut writer, "bar").expect("deleted");
+            sk.delete(&mut writer, "baz").expect("deleted");
+            assert_eq!(sk.get(&writer, "foo").expect("read"), None);
+            assert_eq!(sk.get(&writer, "bar").expect("read"), None);
+            assert_eq!(sk.get(&writer, "baz").expect("read"), None);
+
+            // Isolation. Reads still return values.
+            let r = k.read().unwrap();
+            assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234)));
+            assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true)));
+            assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
+        }
+
+        // Dropped: tx rollback. Reads will still return values.
+
+        {
+            let r = k.read().unwrap();
+            assert_eq!(sk.get(&r, "foo").expect("read"), Some(Value::I64(1234)));
+            assert_eq!(sk.get(&r, "bar").expect("read"), Some(Value::Bool(true)));
+            assert_eq!(sk.get(&r, "baz").expect("read"), Some(Value::Str("héllo, yöu")));
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.delete(&mut writer, "foo").expect("deleted");
+            sk.delete(&mut writer, "bar").expect("deleted");
+            sk.delete(&mut writer, "baz").expect("deleted");
+            assert_eq!(sk.get(&writer, "foo").expect("read"), None);
+            assert_eq!(sk.get(&writer, "bar").expect("read"), None);
+            assert_eq!(sk.get(&writer, "baz").expect("read"), None);
+
+            writer.commit().expect("committed");
+        }
+
+        // Committed. Reads will succeed but return None to indicate a missing value.
+        {
+            let r = k.read().unwrap();
+            assert_eq!(sk.get(&r, "foo").expect("read"), None);
+            assert_eq!(sk.get(&r, "bar").expect("read"), None);
+            assert_eq!(sk.get(&r, "baz").expect("read"), None);
+        }
+    }
+
+    #[test]
+    fn test_single_store_clear() {
+        let root = Builder::new().prefix("test_single_store_clear").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+            sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+            sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            sk.clear(&mut writer).expect("cleared");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let r = k.read().unwrap();
+            let iter = sk.iter_start(&r).expect("iter");
+            assert_eq!(iter.count(), 0);
+        }
+    }
+
+    #[test]
+    fn test_multi_put_get_del() {
+        let root = Builder::new().prefix("test_multi_put_get_del").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap();
+        let mut writer = k.write().unwrap();
+        multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap();
+        multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap();
+        multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap();
+        multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap();
+        multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap();
+        multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap();
+        writer.commit().unwrap();
+        let writer = k.write().unwrap();
+        {
+            let mut iter = multistore.get(&writer, "str1").unwrap();
+            let (id, val) = iter.next().unwrap().unwrap();
+            assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 bar"))));
+            let (id, val) = iter.next().unwrap().unwrap();
+            assert_eq!((id, val), (&b"str1"[..], Some(Value::Str("str1 foo"))));
+        }
+        writer.commit().unwrap();
+        let mut writer = k.write().unwrap();
+
+        multistore.delete(&mut writer, "str1", &Value::Str("str1 foo")).unwrap();
+        assert_eq!(multistore.get_first(&writer, "str1").unwrap(), Some(Value::Str("str1 bar")));
+
+        multistore.delete(&mut writer, "str2", &Value::Str("str2 bar")).unwrap();
+        assert_eq!(multistore.get_first(&writer, "str2").unwrap(), Some(Value::Str("str2 foo")));
+
+        multistore.delete_all(&mut writer, "str3").unwrap();
+        assert_eq!(multistore.get_first(&writer, "str3").unwrap(), None);
+        writer.commit().unwrap();
+    }
+
+    #[test]
+    fn test_multiple_store_clear() {
+        let root = Builder::new().prefix("test_multiple_store_clear").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        let multistore = k.open_multi("multistore", StoreOptions::create()).expect("opened");
+
+        {
+            let mut writer = k.write().expect("writer");
+            multistore.put(&mut writer, "str1", &Value::Str("str1 foo")).unwrap();
+            multistore.put(&mut writer, "str1", &Value::Str("str1 bar")).unwrap();
+            multistore.put(&mut writer, "str2", &Value::Str("str2 foo")).unwrap();
+            multistore.put(&mut writer, "str2", &Value::Str("str2 bar")).unwrap();
+            multistore.put(&mut writer, "str3", &Value::Str("str3 foo")).unwrap();
+            multistore.put(&mut writer, "str3", &Value::Str("str3 bar")).unwrap();
+            writer.commit().expect("committed");
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            multistore.clear(&mut writer).expect("cleared");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let r = k.read().unwrap();
+            assert_eq!(multistore.get_first(&r, "str1").expect("read"), None);
+            assert_eq!(multistore.get_first(&r, "str2").expect("read"), None);
+            assert_eq!(multistore.get_first(&r, "str3").expect("read"), None);
+        }
+    }
+
+    #[test]
+    fn test_open_store_for_read() {
+        let root = Builder::new().prefix("test_open_store_for_read").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        // First create the store, and start a write transaction on it.
+        let sk = k.open_single("sk", StoreOptions::create()).expect("opened");
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::Str("bar")).expect("write");
+
+        // Open the same store for read, note that the write transaction is still in progress,
+        // it should not block the reader though.
+        let sk_readonly = k.open_single("sk", StoreOptions::default()).expect("opened");
+        writer.commit().expect("commit");
+        // Now the write transaction is committed, any followed reads should see its change.
+        let reader = k.read().expect("reader");
+        assert_eq!(sk_readonly.get(&reader, "foo").expect("read"), Some(Value::Str("bar")));
+    }
+
+    #[test]
+    #[should_panic(expected = "open a missing store")]
+    fn test_open_a_missing_store() {
+        let root = Builder::new().prefix("test_open_a_missing_store").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let _sk = k.open("sk", StoreOptions::default()).expect("open a missing store");
+    }
+
+    #[test]
+    fn test_open_fail_with_badrslot() {
+        let root = Builder::new().prefix("test_open_fail_with_badrslot").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        // First create the store
+        let _sk = k.open_single("sk", StoreOptions::create()).expect("opened");
+        // Open a reader on this store
+        let _reader = k.read().expect("reader");
+        // Open the same store for read while the reader is in progress will panic
+        let store: Result<SingleStore, StoreError> = k.open_single("sk", StoreOptions::default());
+        match store {
+            Err(StoreError::OpenAttemptedDuringTransaction(_thread_id)) => (),
+            _ => panic!("should panic"),
+        }
+    }
+
+    #[test]
+    fn test_read_before_write_num() {
+        let root = Builder::new().prefix("test_read_before_write_num").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        // Test reading a number, modifying it, and then writing it back.
+        // We have to be done with the Value::I64 before calling Writer::put,
+        // as the Value::I64 borrows an immutable reference to the Writer.
+        // So we extract and copy its primitive value.
+
+        fn get_existing_foo(writer: &Writer, store: SingleStore) -> Option<i64> {
+            match store.get(writer, "foo").expect("read") {
+                Some(Value::I64(val)) => Some(val),
+                _ => None,
+            }
+        }
+
+        let mut writer = k.write().expect("writer");
+        let mut existing = get_existing_foo(&writer, sk).unwrap_or(99);
+        existing += 1;
+        sk.put(&mut writer, "foo", &Value::I64(existing)).expect("success");
+
+        let updated = get_existing_foo(&writer, sk).unwrap_or(99);
+        assert_eq!(updated, 100);
+        writer.commit().expect("commit");
+    }
+
+    #[test]
+    fn test_read_before_write_str() {
+        let root = Builder::new().prefix("test_read_before_write_str").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        // Test reading a string, modifying it, and then writing it back.
+        // We have to be done with the Value::Str before calling Writer::put,
+        // as the Value::Str (and its underlying &str) borrows an immutable
+        // reference to the Writer.  So we copy it to a String.
+
+        let mut writer = k.write().expect("writer");
+        let mut existing = match sk.get(&writer, "foo").expect("read") {
+            Some(Value::Str(val)) => val,
+            _ => "",
+        }
+        .to_string();
+        existing.push('…');
+        sk.put(&mut writer, "foo", &Value::Str(&existing)).expect("write");
+        writer.commit().expect("commit");
+    }
+
+    #[test]
+    fn test_concurrent_read_transactions_prohibited() {
+        let root = Builder::new().prefix("test_concurrent_reads_prohibited").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        let _first = k.read().expect("reader");
+        let second = k.read();
+
+        match second {
+            Err(StoreError::ReadTransactionAlreadyExists(t)) => {
+                println!("Thread was {:?}", t);
+            },
+            _ => {
+                panic!("Expected error.");
+            },
+        }
+    }
+
+    #[test]
+    fn test_isolation() {
+        let root = Builder::new().prefix("test_isolation").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s: SingleStore = k.open_single("s", StoreOptions::create()).expect("opened");
+
+        // Add one field.
+        {
+            let mut writer = k.write().expect("writer");
+            s.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let reader = k.read().unwrap();
+            assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
+        }
+
+        // Establish a long-lived reader that outlasts a writer.
+        let reader = k.read().expect("reader");
+        assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
+
+        // Start a write transaction.
+        let mut writer = k.write().expect("writer");
+        s.put(&mut writer, "foo", &Value::I64(999)).expect("wrote");
+
+        // The reader and writer are isolated.
+        assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
+        assert_eq!(s.get(&writer, "foo").expect("read"), Some(Value::I64(999)));
+
+        // If we commit the writer, we still have isolation.
+        writer.commit().expect("committed");
+        assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
+
+        // A new reader sees the committed value. Note that LMDB doesn't allow two
+        // read transactions to exist in the same thread, so we abort the previous one.
+        reader.abort();
+        let reader = k.read().expect("reader");
+        assert_eq!(s.get(&reader, "foo").expect("read"), Some(Value::I64(999)));
+    }
+
+    #[test]
+    fn test_blob() {
+        let root = Builder::new().prefix("test_round_trip_blob").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+        let mut writer = k.write().expect("writer");
+
+        assert_eq!(sk.get(&writer, "foo").expect("read"), None);
+        sk.put(&mut writer, "foo", &Value::Blob(&[1, 2, 3, 4])).expect("wrote");
+        assert_eq!(sk.get(&writer, "foo").expect("read"), Some(Value::Blob(&[1, 2, 3, 4])));
+
+        fn u16_to_u8(src: &[u16]) -> Vec<u8> {
+            let mut dst = vec![0; 2 * src.len()];
+            LittleEndian::write_u16_into(src, &mut dst);
+            dst
+        }
+
+        fn u8_to_u16(src: &[u8]) -> Vec<u16> {
+            let mut dst = vec![0; src.len() / 2];
+            LittleEndian::read_u16_into(src, &mut dst);
+            dst
+        }
+
+        // When storing UTF-16 strings as blobs, we'll need to convert
+        // their [u16] backing storage to [u8].  Test that converting, writing,
+        // reading, and converting back works as expected.
+        let u16_array = [1000, 10000, 54321, 65535];
+        assert_eq!(sk.get(&writer, "bar").expect("read"), None);
+        sk.put(&mut writer, "bar", &Value::Blob(&u16_to_u8(&u16_array))).expect("wrote");
+        let u8_array = match sk.get(&writer, "bar").expect("read") {
+            Some(Value::Blob(val)) => val,
+            _ => &[],
+        };
+        assert_eq!(u8_to_u16(u8_array), u16_array);
+    }
+
+    #[test]
+    fn test_sync() {
+        let root = Builder::new().prefix("test_sync").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let mut builder = Rkv::environment_builder();
+        builder.set_max_dbs(1);
+        builder.set_flags(EnvironmentFlags::NO_SYNC);
+        {
+            let k = Rkv::from_env(root.path(), builder).expect("new succeeded");
+            let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+            {
+                let mut writer = k.write().expect("writer");
+                sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+                writer.commit().expect("committed");
+                k.sync(true).expect("synced");
+            }
+        }
+        let k = Rkv::from_env(root.path(), builder).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::default()).expect("opened");
+        let reader = k.read().expect("reader");
+        assert_eq!(sk.get(&reader, "foo").expect("read"), Some(Value::I64(1234)));
+    }
+
+    #[test]
+    fn test_stat() {
+        let root = Builder::new().prefix("test_stat").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        for i in 0..5 {
+            let sk: IntegerStore<u32> =
+                k.open_integer(&format!("sk{}", i)[..], StoreOptions::create()).expect("opened");
+            {
+                let mut writer = k.write().expect("writer");
+                sk.put(&mut writer, i, &Value::I64(i64::from(i))).expect("wrote");
+                writer.commit().expect("committed");
+            }
+        }
+        assert_eq!(k.stat().expect("stat").depth(), 1);
+        assert_eq!(k.stat().expect("stat").entries(), 5);
+        assert_eq!(k.stat().expect("stat").branch_pages(), 0);
+        assert_eq!(k.stat().expect("stat").leaf_pages(), 1);
+    }
+
+    #[test]
+    fn test_info() {
+        let root = Builder::new().prefix("test_info").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+        let mut writer = k.write().expect("writer");
+
+        sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
+        writer.commit().expect("commited");
+
+        let info = k.info().expect("info");
+
+        // The default size is 1MB.
+        assert_eq!(info.map_size(), DEFAULT_SIZE);
+        // Should greater than 0 after the write txn.
+        assert!(info.last_pgno() > 0);
+        // A txn to open_single + a txn to write.
+        assert_eq!(info.last_txnid(), 2);
+        // The default max readers is 126.
+        assert_eq!(info.max_readers(), 126);
+        assert_eq!(info.num_readers(), 0);
+
+        // A new reader should increment the reader counter.
+        let _reader = k.read().expect("reader");
+        let info = k.info().expect("info");
+
+        assert_eq!(info.num_readers(), 1);
+    }
+
+    #[test]
+    fn test_load_ratio() {
+        let root = Builder::new().prefix("test_load_ratio").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
+        writer.commit().expect("commited");
+
+        let ratio = k.load_ratio().expect("ratio");
+        assert!(ratio > 0.0_f32 && ratio < 1.0_f32);
+
+        // Put data to database should increase the load ratio.
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "bar", &Value::Str(&"more-than-4KB".repeat(1000))).expect("wrote");
+        writer.commit().expect("commited");
+        let new_ratio = k.load_ratio().expect("ratio");
+        assert!(new_ratio > ratio);
+
+        // Clear the database so that all the used pages should go to freelist, hence the ratio
+        // should decrease.
+        let mut writer = k.write().expect("writer");
+        sk.clear(&mut writer).expect("clear");
+        writer.commit().expect("commited");
+        let after_clear_ratio = k.load_ratio().expect("ratio");
+        assert!(after_clear_ratio < new_ratio);
+    }
+
+    #[test]
+    fn test_set_map_size() {
+        let root = Builder::new().prefix("test_size_map_size").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        assert_eq!(k.info().expect("info").map_size(), DEFAULT_SIZE);
+
+        k.set_map_size(2 * DEFAULT_SIZE).expect("resized");
+
+        // Should be able to write.
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
+        writer.commit().expect("commited");
+
+        assert_eq!(k.info().expect("info").map_size(), 2 * DEFAULT_SIZE);
+    }
+
+    #[test]
+    fn test_iter() {
+        let root = Builder::new().prefix("test_iter").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        // An iterator over an empty store returns no values.
+        {
+            let reader = k.read().unwrap();
+            let mut iter = sk.iter_start(&reader).unwrap();
+            assert!(iter.next().is_none());
+        }
+
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+        sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote");
+        sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+        sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+        sk.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote");
+        sk.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote");
+        writer.commit().expect("committed");
+
+        let reader = k.read().unwrap();
+
+        // Reader.iter() returns (key, value) tuples ordered by key.
+        let mut iter = sk.iter_start(&reader).unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "bar");
+        assert_eq!(val, Some(Value::Bool(true)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "baz");
+        assert_eq!(val, Some(Value::Str("héllo, yöu")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "foo");
+        assert_eq!(val, Some(Value::I64(1234)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
+        assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterators don't loop.  Once one returns None, additional calls
+        // to its next() method will always return None.
+        assert!(iter.next().is_none());
+
+        // Reader.iter_from() begins iteration at the first key equal to
+        // or greater than the given key.
+        let mut iter = sk.iter_from(&reader, "moo").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Reader.iter_from() works as expected when the given key is a prefix
+        // of a key in the store.
+        let mut iter = sk.iter_from(&reader, "no").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+    }
+
+    #[test]
+    fn test_iter_from_key_greater_than_existing() {
+        let root = Builder::new().prefix("test_iter_from_key_greater_than_existing").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let sk: SingleStore = k.open_single("sk", StoreOptions::create()).expect("opened");
+
+        let mut writer = k.write().expect("writer");
+        sk.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+        sk.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote");
+        sk.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+        sk.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+        writer.commit().expect("committed");
+
+        let reader = k.read().unwrap();
+        let mut iter = sk.iter_from(&reader, "nuu").unwrap();
+        assert!(iter.next().is_none());
+    }
+
+    #[test]
+    fn test_multiple_store_read_write() {
+        let root = Builder::new().prefix("test_multiple_store_read_write").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+
+        let s1: SingleStore = k.open_single("store_1", StoreOptions::create()).expect("opened");
+        let s2: SingleStore = k.open_single("store_2", StoreOptions::create()).expect("opened");
+        let s3: SingleStore = k.open_single("store_3", StoreOptions::create()).expect("opened");
+
+        let mut writer = k.write().expect("writer");
+        s1.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
+        s2.put(&mut writer, "foo", &Value::I64(123)).expect("wrote");
+        s3.put(&mut writer, "foo", &Value::Bool(true)).expect("wrote");
+
+        assert_eq!(s1.get(&writer, "foo").expect("read"), Some(Value::Str("bar")));
+        assert_eq!(s2.get(&writer, "foo").expect("read"), Some(Value::I64(123)));
+        assert_eq!(s3.get(&writer, "foo").expect("read"), Some(Value::Bool(true)));
+
+        writer.commit().expect("committed");
+
+        let reader = k.read().expect("unbound_reader");
+        assert_eq!(s1.get(&reader, "foo").expect("read"), Some(Value::Str("bar")));
+        assert_eq!(s2.get(&reader, "foo").expect("read"), Some(Value::I64(123)));
+        assert_eq!(s3.get(&reader, "foo").expect("read"), Some(Value::Bool(true)));
+        reader.abort();
+
+        // test delete across multiple stores
+        let mut writer = k.write().expect("writer");
+        s1.delete(&mut writer, "foo").expect("deleted");
+        s2.delete(&mut writer, "foo").expect("deleted");
+        s3.delete(&mut writer, "foo").expect("deleted");
+        writer.commit().expect("committed");
+
+        let reader = k.read().expect("reader");
+        assert_eq!(s1.get(&reader, "key").expect("value"), None);
+        assert_eq!(s2.get(&reader, "key").expect("value"), None);
+        assert_eq!(s3.get(&reader, "key").expect("value"), None);
+    }
+
+    #[test]
+    fn test_multiple_store_iter() {
+        let root = Builder::new().prefix("test_multiple_store_iter").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s1: SingleStore = k.open_single("store_1", StoreOptions::create()).expect("opened");
+        let s2: SingleStore = k.open_single("store_2", StoreOptions::create()).expect("opened");
+
+        let mut writer = k.write().expect("writer");
+        // Write to "s1"
+        s1.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+        s1.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote");
+        s1.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+        s1.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+        s1.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote");
+        s1.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote");
+        // &mut writer to "s2"
+        s2.put(&mut writer, "foo", &Value::I64(1234)).expect("wrote");
+        s2.put(&mut writer, "noo", &Value::F64(1234.0.into())).expect("wrote");
+        s2.put(&mut writer, "bar", &Value::Bool(true)).expect("wrote");
+        s2.put(&mut writer, "baz", &Value::Str("héllo, yöu")).expect("wrote");
+        s2.put(&mut writer, "héllò, töűrîst", &Value::Str("Emil.RuleZ!")).expect("wrote");
+        s2.put(&mut writer, "你好,遊客", &Value::Str("米克規則")).expect("wrote");
+        writer.commit().expect("committed");
+
+        let reader = k.read().unwrap();
+
+        // Iterate through the whole store in "s1"
+        let mut iter = s1.iter_start(&reader).unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "bar");
+        assert_eq!(val, Some(Value::Bool(true)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "baz");
+        assert_eq!(val, Some(Value::Str("héllo, yöu")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "foo");
+        assert_eq!(val, Some(Value::I64(1234)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
+        assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterate through the whole store in "s2"
+        let mut iter = s2.iter_start(&reader).unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "bar");
+        assert_eq!(val, Some(Value::Bool(true)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "baz");
+        assert_eq!(val, Some(Value::Str("héllo, yöu")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "foo");
+        assert_eq!(val, Some(Value::I64(1234)));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "héllò, töűrîst");
+        assert_eq!(val, Some(Value::Str("Emil.RuleZ!")));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterate from a given key in "s1"
+        let mut iter = s1.iter_from(&reader, "moo").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterate from a given key in "s2"
+        let mut iter = s2.iter_from(&reader, "moo").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterate from a given prefix in "s1"
+        let mut iter = s1.iter_from(&reader, "no").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+
+        // Iterate from a given prefix in "s2"
+        let mut iter = s2.iter_from(&reader, "no").unwrap();
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "noo");
+        assert_eq!(val, Some(Value::F64(1234.0.into())));
+        let (key, val) = iter.next().unwrap().unwrap();
+        assert_eq!(str::from_utf8(key).expect("key"), "你好,遊客");
+        assert_eq!(val, Some(Value::Str("米克規則")));
+        assert!(iter.next().is_none());
+    }
+
+    #[test]
+    fn test_store_multiple_thread() {
+        let root = Builder::new().prefix("test_multiple_thread").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let rkv_arc = Arc::new(RwLock::new(Rkv::new(root.path()).expect("new succeeded")));
+        let store = rkv_arc.read().unwrap().open_single("test", StoreOptions::create()).expect("opened");
+
+        let num_threads = 10;
+        let mut write_handles = Vec::with_capacity(num_threads as usize);
+        let mut read_handles = Vec::with_capacity(num_threads as usize);
+
+        // Note that this isn't intended to demonstrate a good use of threads.
+        // For this shape of data, it would be more performant to write/read
+        // all values using one transaction in a single thread. The point here
+        // is just to confirm that a store can be shared by multiple threads.
+
+        // For each KV pair, spawn a thread that writes it to the store.
+        for i in 0..num_threads {
+            let rkv_arc = rkv_arc.clone();
+            write_handles.push(thread::spawn(move || {
+                let rkv = rkv_arc.write().expect("rkv");
+                let mut writer = rkv.write().expect("writer");
+                store.put(&mut writer, i.to_string(), &Value::U64(i)).expect("written");
+                writer.commit().unwrap();
+            }));
+        }
+        for handle in write_handles {
+            handle.join().expect("joined");
+        }
+
+        // For each KV pair, spawn a thread that reads it from the store
+        // and returns its value.
+        for i in 0..num_threads {
+            let rkv_arc = rkv_arc.clone();
+            read_handles.push(thread::spawn(move || {
+                let rkv = rkv_arc.read().expect("rkv");
+                let reader = rkv.read().expect("reader");
+                let value = match store.get(&reader, i.to_string()) {
+                    Ok(Some(Value::U64(value))) => value,
+                    Ok(Some(_)) => panic!("value type unexpected"),
+                    Ok(None) => panic!("value not found"),
+                    Err(err) => panic!(err),
+                };
+                assert_eq!(value, i);
+                value
+            }));
+        }
+
+        // Sum the values returned from the threads and confirm that they're
+        // equal to the sum of values written to the threads.
+        let thread_sum: u64 = read_handles.into_iter().map(|handle| handle.join().expect("value")).sum();
+        assert_eq!(thread_sum, (0..num_threads).sum());
+    }
+
+    #[test]
+    fn test_use_value_as_key() {
+        let root = Builder::new().prefix("test_use_value_as_key").tempdir().expect("tempdir");
+        let rkv = Rkv::new(root.path()).expect("new succeeded");
+        let store = rkv.open_single("store", StoreOptions::create()).expect("opened");
+
+        {
+            let mut writer = rkv.write().expect("writer");
+            store.put(&mut writer, "foo", &Value::Str("bar")).expect("wrote");
+            store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote");
+            writer.commit().expect("committed");
+        }
+
+        // It's possible to retrieve a value with a Reader and then use it
+        // as a key with a Writer.
+        {
+            let reader = &rkv.read().unwrap();
+            if let Some(Value::Str(key)) = store.get(reader, "foo").expect("read") {
+                let mut writer = rkv.write().expect("writer");
+                store.delete(&mut writer, key).expect("deleted");
+                writer.commit().expect("committed");
+            }
+        }
+
+        {
+            let mut writer = rkv.write().expect("writer");
+            store.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote");
+            writer.commit().expect("committed");
+        }
+
+        // You can also retrieve a Value with a Writer and then use it as a key
+        // with the same Writer if you copy the value to an owned type
+        // so the Writer isn't still being borrowed by the retrieved value
+        // when you try to borrow the Writer again to modify that value.
+        {
+            let mut writer = rkv.write().expect("writer");
+            if let Some(Value::Str(value)) = store.get(&writer, "foo").expect("read") {
+                let key = value.to_owned();
+                store.delete(&mut writer, key).expect("deleted");
+                writer.commit().expect("committed");
+            }
+        }
+
+        {
+            let name1 = rkv.open_single("name1", StoreOptions::create()).expect("opened");
+            let name2 = rkv.open_single("name2", StoreOptions::create()).expect("opened");
+            let mut writer = rkv.write().expect("writer");
+            name1.put(&mut writer, "key1", &Value::Str("bar")).expect("wrote");
+            name1.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote");
+            name2.put(&mut writer, "key2", &Value::Str("bar")).expect("wrote");
+            name2.put(&mut writer, "bar", &Value::Str("baz")).expect("wrote");
+            writer.commit().expect("committed");
+        }
+
+        // You can also iterate (store, key) pairs to retrieve foreign keys,
+        // then iterate those foreign keys to modify/delete them.
+        //
+        // You need to open the stores in advance, since opening a store
+        // uses a write transaction internally, so opening them while a writer
+        // is extant will hang.
+        //
+        // And you need to copy the values to an owned type so the Writer isn't
+        // still being borrowed by a retrieved value when you try to borrow
+        // the Writer again to modify another value.
+        let fields = vec![
+            (rkv.open_single("name1", StoreOptions::create()).expect("opened"), "key1"),
+            (rkv.open_single("name2", StoreOptions::create()).expect("opened"), "key2"),
+        ];
+        {
+            let mut foreignkeys = Vec::new();
+            let mut writer = rkv.write().expect("writer");
+            for (store, key) in fields.iter() {
+                if let Some(Value::Str(value)) = store.get(&writer, key).expect("read") {
+                    foreignkeys.push((store, value.to_owned()));
+                }
+            }
+            for (store, key) in foreignkeys.iter() {
+                store.delete(&mut writer, key).expect("deleted");
+            }
+            writer.commit().expect("committed");
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/error.rs
@@ -0,0 +1,190 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::path::PathBuf;
+
+use bincode;
+use failure::Fail;
+use lmdb;
+
+use crate::value::Type;
+
+#[derive(Debug, Fail)]
+pub enum DataError {
+    #[fail(display = "unknown type tag: {}", _0)]
+    UnknownType(u8),
+
+    #[fail(display = "unexpected type tag: expected {}, got {}", expected, actual)]
+    UnexpectedType {
+        expected: Type,
+        actual: Type,
+    },
+
+    #[fail(display = "empty data; expected tag")]
+    Empty,
+
+    #[fail(display = "invalid value for type {}: {}", value_type, err)]
+    DecodingError {
+        value_type: Type,
+        err: Box<bincode::ErrorKind>,
+    },
+
+    #[fail(display = "couldn't encode value: {}", _0)]
+    EncodingError(Box<bincode::ErrorKind>),
+
+    #[fail(display = "invalid uuid bytes")]
+    InvalidUuid,
+}
+
+impl From<Box<bincode::ErrorKind>> for DataError {
+    fn from(e: Box<bincode::ErrorKind>) -> DataError {
+        DataError::EncodingError(e)
+    }
+}
+
+#[derive(Debug, Fail)]
+pub enum StoreError {
+    #[fail(display = "I/O error: {:?}", _0)]
+    IoError(::std::io::Error),
+
+    #[fail(display = "directory does not exist or not a directory: {:?}", _0)]
+    DirectoryDoesNotExistError(PathBuf),
+
+    #[fail(display = "data error: {:?}", _0)]
+    DataError(DataError),
+
+    #[fail(display = "lmdb error: {}", _0)]
+    LmdbError(lmdb::Error),
+
+    #[fail(display = "read transaction already exists in thread {:?}", _0)]
+    ReadTransactionAlreadyExists(::std::thread::ThreadId),
+
+    #[fail(display = "attempted to open DB during transaction in thread {:?}", _0)]
+    OpenAttemptedDuringTransaction(::std::thread::ThreadId),
+}
+
+impl StoreError {
+    pub fn open_during_transaction() -> StoreError {
+        StoreError::OpenAttemptedDuringTransaction(::std::thread::current().id())
+    }
+}
+
+impl From<lmdb::Error> for StoreError {
+    fn from(e: lmdb::Error) -> StoreError {
+        match e {
+            lmdb::Error::BadRslot => StoreError::ReadTransactionAlreadyExists(::std::thread::current().id()),
+            e => StoreError::LmdbError(e),
+        }
+    }
+}
+
+impl From<DataError> for StoreError {
+    fn from(e: DataError) -> StoreError {
+        StoreError::DataError(e)
+    }
+}
+
+impl From<::std::io::Error> for StoreError {
+    fn from(e: ::std::io::Error) -> StoreError {
+        StoreError::IoError(e)
+    }
+}
+
+#[derive(Debug, Fail)]
+pub enum MigrateError {
+    #[fail(display = "database not found: {:?}", _0)]
+    DatabaseNotFound(String),
+
+    #[fail(display = "{}", _0)]
+    FromString(String),
+
+    #[fail(display = "couldn't determine bit depth")]
+    IndeterminateBitDepth,
+
+    #[fail(display = "I/O error: {:?}", _0)]
+    IoError(::std::io::Error),
+
+    #[fail(display = "invalid DatabaseFlags bits")]
+    InvalidDatabaseBits,
+
+    #[fail(display = "invalid data version")]
+    InvalidDataVersion,
+
+    #[fail(display = "invalid magic number")]
+    InvalidMagicNum,
+
+    #[fail(display = "invalid NodeFlags bits")]
+    InvalidNodeBits,
+
+    #[fail(display = "invalid PageFlags bits")]
+    InvalidPageBits,
+
+    #[fail(display = "invalid page number")]
+    InvalidPageNum,
+
+    #[fail(display = "lmdb error: {}", _0)]
+    LmdbError(lmdb::Error),
+
+    #[fail(display = "string conversion error")]
+    StringConversionError,
+
+    #[fail(display = "TryFromInt error: {:?}", _0)]
+    TryFromIntError(::std::num::TryFromIntError),
+
+    #[fail(display = "unexpected Page variant")]
+    UnexpectedPageVariant,
+
+    #[fail(display = "unexpected PageHeader variant")]
+    UnexpectedPageHeaderVariant,
+
+    #[fail(display = "unsupported PageHeader variant")]
+    UnsupportedPageHeaderVariant,
+
+    #[fail(display = "UTF8 error: {:?}", _0)]
+    Utf8Error(::std::str::Utf8Error),
+}
+
+impl From<::std::io::Error> for MigrateError {
+    fn from(e: ::std::io::Error) -> MigrateError {
+        MigrateError::IoError(e)
+    }
+}
+
+impl From<::std::str::Utf8Error> for MigrateError {
+    fn from(e: ::std::str::Utf8Error) -> MigrateError {
+        MigrateError::Utf8Error(e)
+    }
+}
+
+impl From<::std::num::TryFromIntError> for MigrateError {
+    fn from(e: ::std::num::TryFromIntError) -> MigrateError {
+        MigrateError::TryFromIntError(e)
+    }
+}
+
+impl From<&str> for MigrateError {
+    fn from(e: &str) -> MigrateError {
+        MigrateError::FromString(e.to_string())
+    }
+}
+
+impl From<String> for MigrateError {
+    fn from(e: String) -> MigrateError {
+        MigrateError::FromString(e)
+    }
+}
+
+impl From<lmdb::Error> for MigrateError {
+    fn from(e: lmdb::Error) -> MigrateError {
+        match e {
+            e => MigrateError::LmdbError(e),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/lib.rs
@@ -0,0 +1,260 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//! a simple, humane, typed Rust interface to [LMDB](http://www.lmdb.tech/doc/)
+//!
+//! It aims to achieve the following:
+//!
+//! - Avoid LMDB's sharp edges (e.g., obscure error codes for common situations).
+//! - Report errors via [failure](https://docs.rs/failure/).
+//! - Correctly restrict access to one handle per process via a [Manager](struct.Manager.html).
+//! - Use Rust's type system to make single-typed key stores (including LMDB's own integer-keyed stores)
+//!   safe and ergonomic.
+//! - Encode and decode values via [bincode](https://docs.rs/bincode/)/[serde](https://docs.rs/serde/)
+//!   and type tags, achieving platform-independent storage and input/output flexibility.
+//!
+//! It exposes these primary abstractions:
+//!
+//! - [Manager](struct.Manager.html): a singleton that controls access to LMDB environments
+//! - [Rkv](struct.Rkv.html): an LMDB environment that contains a set of key/value databases
+//! - [SingleStore](store/single/struct.SingleStore.html): an LMDB database that contains a set of key/value pairs
+//!
+//! Keys can be anything that implements `AsRef<[u8]>` or integers
+//! (when accessing an [IntegerStore](store/integer/struct.IntegerStore.html)).
+//! Values can be any of the types defined by the [Value](value/enum.Value.html) enum, including:
+//!
+//! - booleans (`Value::Bool`)
+//! - integers (`Value::I64`, `Value::U64`)
+//! - floats (`Value::F64`)
+//! - strings (`Value::Str`)
+//! - blobs (`Value::Blob`)
+//!
+//! See [Value](value/enum.Value.html) for the complete list of supported types.
+//!
+//! ## Basic Usage
+//! ```
+//! use rkv::{Manager, Rkv, SingleStore, Value, StoreOptions};
+//! use std::fs;
+//! use tempfile::Builder;
+//!
+//! // First determine the path to the environment, which is represented
+//! // on disk as a directory containing two files:
+//! //
+//! //   * a data file containing the key/value stores
+//! //   * a lock file containing metadata about current transactions
+//! //
+//! // In this example, we use the `tempfile` crate to create the directory.
+//! //
+//! let root = Builder::new().prefix("simple-db").tempdir().unwrap();
+//! fs::create_dir_all(root.path()).unwrap();
+//! let path = root.path();
+//!
+//! // The Manager enforces that each process opens the same environment
+//! // at most once by caching a handle to each environment that it opens.
+//! // Use it to retrieve the handle to an opened environment—or create one
+//! // if it hasn't already been opened:
+//! let created_arc = Manager::singleton().write().unwrap().get_or_create(path, Rkv::new).unwrap();
+//! let env = created_arc.read().unwrap();
+//!
+//! // Then you can use the environment handle to get a handle to a datastore:
+//! let store: SingleStore = env.open_single("mydb", StoreOptions::create()).unwrap();
+//!
+//! {
+//!     // Use a write transaction to mutate the store via a `Writer`.
+//!     // There can be only one writer for a given environment, so opening
+//!     // a second one will block until the first completes.
+//!     let mut writer = env.write().unwrap();
+//!
+//!     // Keys are `AsRef<[u8]>`, while values are `Value` enum instances.
+//!     // Use the `Blob` variant to store arbitrary collections of bytes.
+//!     // Putting data returns a `Result<(), StoreError>`, where StoreError
+//!     // is an enum identifying the reason for a failure.
+//!     store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
+//!     store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
+//!     store.put(&mut writer, "float", &Value::F64(1234.0.into())).unwrap();
+//!     store.put(&mut writer, "instant", &Value::Instant(1528318073700)).unwrap();
+//!     store.put(&mut writer, "boolean", &Value::Bool(true)).unwrap();
+//!     store.put(&mut writer, "string", &Value::Str("Héllo, wörld!")).unwrap();
+//!     store.put(&mut writer, "json", &Value::Json(r#"{"foo":"bar", "number": 1}"#)).unwrap();
+//!     store.put(&mut writer, "blob", &Value::Blob(b"blob")).unwrap();
+//!
+//!     // You must commit a write transaction before the writer goes out
+//!     // of scope, or the transaction will abort and the data won't persist.
+//!     writer.commit().unwrap();
+//! }
+//!
+//! {
+//!     // Use a read transaction to query the store via a `Reader`.
+//!     // There can be multiple concurrent readers for a store, and readers
+//!     // never block on a writer nor other readers.
+//!     let reader = env.read().expect("reader");
+//!
+//!     // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
+//!     println!("Get int {:?}", store.get(&reader, "int").unwrap());
+//!     println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
+//!     println!("Get float {:?}", store.get(&reader, "float").unwrap());
+//!     println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
+//!     println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
+//!     println!("Get string {:?}", store.get(&reader, "string").unwrap());
+//!     println!("Get json {:?}", store.get(&reader, "json").unwrap());
+//!     println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
+//!
+//!     // Retrieving a non-existent value returns `Ok(None)`.
+//!     println!("Get non-existent value {:?}", store.get(&reader, "non-existent").unwrap());
+//!
+//!     // A read transaction will automatically close once the reader
+//!     // goes out of scope, so isn't necessary to close it explicitly,
+//!     // although you can do so by calling `Reader.abort()`.
+//! }
+//!
+//! {
+//!     // Aborting a write transaction rolls back the change(s).
+//!     let mut writer = env.write().unwrap();
+//!     store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+//!     writer.abort();
+//!     let reader = env.read().expect("reader");
+//!     println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+//! }
+//!
+//! {
+//!     // Explicitly aborting a transaction is not required unless an early
+//!     // abort is desired, since both read and write transactions will
+//!     // implicitly be aborted once they go out of scope.
+//!     {
+//!         let mut writer = env.write().unwrap();
+//!         store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+//!     }
+//!     let reader = env.read().expect("reader");
+//!     println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+//! }
+//!
+//! {
+//!     // Deleting a key/value pair also requires a write transaction.
+//!     let mut writer = env.write().unwrap();
+//!     store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+//!     store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
+//!     store.delete(&mut writer, "foo").unwrap();
+//!
+//!     // A write transaction also supports reading, and the version of the
+//!     // store that it reads includes the changes it has made regardless of
+//!     // the commit state of that transaction.
+
+//!     // In the code above, "foo" and "bar" were put into the store,
+//!     // then "foo" was deleted so only "bar" will return a result when the
+//!     // database is queried via the writer.
+//!     println!("It should be None! ({:?})", store.get(&writer, "foo").unwrap());
+//!     println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
+//!
+//!     // But a reader won't see that change until the write transaction
+//!     // is committed.
+//!     {
+//!         let reader = env.read().expect("reader");
+//!         println!("Get foo {:?}", store.get(&reader, "foo").unwrap());
+//!         println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
+//!     }
+//!     writer.commit().unwrap();
+//!     {
+//!         let reader = env.read().expect("reader");
+//!         println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+//!         println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
+//!     }
+//!
+//!     // Committing a transaction consumes the writer, preventing you
+//!     // from reusing it by failing at compile time with an error.
+//!     // This line would report error[E0382]: borrow of moved value: `writer`.
+//!     // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
+//! }
+//!
+//! {
+//!     // Clearing all the entries in the store with a write transaction.
+//!     {
+//!         let mut writer = env.write().unwrap();
+//!         store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
+//!         store.put(&mut writer, "bar", &Value::Str("baz")).unwrap();
+//!         writer.commit().unwrap();
+//!     }
+//!
+//!     {
+//!         let mut writer = env.write().unwrap();
+//!         store.clear(&mut writer).unwrap();
+//!         writer.commit().unwrap();
+//!     }
+//!
+//!     {
+//!         let reader = env.read().expect("reader");
+//!         println!("It should be None! ({:?})", store.get(&reader, "foo").unwrap());
+//!         println!("It should be None! ({:?})", store.get(&reader, "bar").unwrap());
+//!     }
+//!
+//! }
+//!
+//! ```
+
+#![allow(dead_code)]
+
+pub use lmdb::{
+    DatabaseFlags,
+    EnvironmentBuilder,
+    EnvironmentFlags,
+    WriteFlags,
+};
+
+mod env;
+pub mod error;
+mod manager;
+pub mod migrate;
+mod readwrite;
+pub mod store;
+pub mod value;
+
+pub use lmdb::{
+    Cursor,
+    Database,
+    Info,
+    Iter as LmdbIter,
+    RoCursor,
+    Stat,
+};
+
+pub use self::readwrite::{
+    Readable,
+    Reader,
+    Writer,
+};
+pub use self::store::integer::{
+    IntegerStore,
+    PrimitiveInt,
+};
+pub use self::store::integermulti::MultiIntegerStore;
+pub use self::store::multi::MultiStore;
+pub use self::store::single::SingleStore;
+pub use self::store::Options as StoreOptions;
+
+pub use self::env::Rkv;
+
+pub use self::error::{
+    DataError,
+    StoreError,
+};
+
+pub use self::manager::Manager;
+
+pub use self::value::{
+    OwnedValue,
+    Value,
+};
+
+fn read_transform(val: Result<&[u8], lmdb::Error>) -> Result<Option<Value>, StoreError> {
+    match val {
+        Ok(bytes) => Value::from_tagged_slice(bytes).map(Some).map_err(StoreError::DataError),
+        Err(lmdb::Error::NotFound) => Ok(None),
+        Err(e) => Err(StoreError::LmdbError(e)),
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/manager.rs
@@ -0,0 +1,196 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use lazy_static::lazy_static;
+use std::collections::BTreeMap;
+
+use std::io::{
+    self,
+    Error,
+    ErrorKind,
+};
+
+use std::collections::btree_map::Entry;
+
+use std::os::raw::c_uint;
+
+use std::path::{
+    Path,
+    PathBuf,
+};
+
+use std::sync::{
+    Arc,
+    RwLock,
+};
+
+use url::Url;
+
+use crate::error::StoreError;
+
+use crate::Rkv;
+
+lazy_static! {
+    /// A process is only permitted to have one open handle to each Rkv environment.
+    /// This manager exists to enforce that constraint: don't open environments directly.
+    static ref MANAGER: RwLock<Manager> = RwLock::new(Manager::new());
+}
+
+// Workaround the UNC path on Windows, see https://github.com/rust-lang/rust/issues/42869.
+// Otherwise, `Env::from_env()` will panic with error_no(123).
+fn canonicalize_path<'p, P>(path: P) -> io::Result<PathBuf>
+where
+    P: Into<&'p Path>,
+{
+    let canonical = path.into().canonicalize()?;
+    if cfg!(target_os = "windows") {
+        let url = Url::from_file_path(&canonical).map_err(|_e| Error::new(ErrorKind::Other, "URL passing error"))?;
+        return url.to_file_path().map_err(|_e| Error::new(ErrorKind::Other, "path canonicalization error"));
+    }
+    Ok(canonical)
+}
+
+/// A process is only permitted to have one open handle to each Rkv environment.
+/// This manager exists to enforce that constraint: don't open environments directly.
+pub struct Manager {
+    environments: BTreeMap<PathBuf, Arc<RwLock<Rkv>>>,
+}
+
+impl Manager {
+    fn new() -> Manager {
+        Manager {
+            environments: Default::default(),
+        }
+    }
+
+    pub fn singleton() -> &'static RwLock<Manager> {
+        &*MANAGER
+    }
+
+    /// Return the open env at `path`, returning `None` if it has not already been opened.
+    pub fn get<'p, P>(&self, path: P) -> Result<Option<Arc<RwLock<Rkv>>>, ::std::io::Error>
+    where
+        P: Into<&'p Path>,
+    {
+        let canonical = canonicalize_path(path)?;
+        Ok(self.environments.get(&canonical).cloned())
+    }
+
+    /// Return the open env at `path`, or create it by calling `f`.
+    pub fn get_or_create<'p, F, P>(&mut self, path: P, f: F) -> Result<Arc<RwLock<Rkv>>, StoreError>
+    where
+        F: FnOnce(&Path) -> Result<Rkv, StoreError>,
+        P: Into<&'p Path>,
+    {
+        let canonical = canonicalize_path(path)?;
+        Ok(match self.environments.entry(canonical) {
+            Entry::Occupied(e) => e.get().clone(),
+            Entry::Vacant(e) => {
+                let k = Arc::new(RwLock::new(f(e.key().as_path())?));
+                e.insert(k).clone()
+            },
+        })
+    }
+
+    /// Return the open env at `path` with capacity `capacity`,
+    /// or create it by calling `f`.
+    pub fn get_or_create_with_capacity<'p, F, P>(
+        &mut self,
+        path: P,
+        capacity: c_uint,
+        f: F,
+    ) -> Result<Arc<RwLock<Rkv>>, StoreError>
+    where
+        F: FnOnce(&Path, c_uint) -> Result<Rkv, StoreError>,
+        P: Into<&'p Path>,
+    {
+        let canonical = canonicalize_path(path)?;
+        Ok(match self.environments.entry(canonical) {
+            Entry::Occupied(e) => e.get().clone(),
+            Entry::Vacant(e) => {
+                let k = Arc::new(RwLock::new(f(e.key().as_path(), capacity)?));
+                e.insert(k).clone()
+            },
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::fs;
+    use tempfile::Builder;
+
+    use super::*;
+
+    /// Test that the manager will return the same Rkv instance each time for each path.
+    #[test]
+    fn test_same() {
+        let root = Builder::new().prefix("test_same").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+
+        let mut manager = Manager::new();
+
+        let p = root.path();
+        assert!(manager.get(p).expect("success").is_none());
+
+        let created_arc = manager.get_or_create(p, Rkv::new).expect("created");
+        let fetched_arc = manager.get(p).expect("success").expect("existed");
+        assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
+    }
+
+    /// Test that one can mutate managed Rkv instances in surprising ways.
+    #[test]
+    fn test_mutate_managed_rkv() {
+        let mut manager = Manager::new();
+
+        let root1 = Builder::new().prefix("test_mutate_managed_rkv_1").tempdir().expect("tempdir");
+        fs::create_dir_all(root1.path()).expect("dir created");
+        let path1 = root1.path();
+        let arc = manager.get_or_create(path1, Rkv::new).expect("created");
+
+        // Arc<RwLock<>> has interior mutability, so we can replace arc's Rkv
+        // instance with a new instance that has a different path.
+        let root2 = Builder::new().prefix("test_mutate_managed_rkv_2").tempdir().expect("tempdir");
+        fs::create_dir_all(root2.path()).expect("dir created");
+        let path2 = root2.path();
+        {
+            let mut rkv = arc.write().expect("guard");
+            let rkv2 = Rkv::new(path2).expect("Rkv");
+            *rkv = rkv2;
+        }
+
+        // arc now has a different internal Rkv with path2, but it's still
+        // mapped to path1 in manager, so its pointer is equal to a new Arc
+        // for path1.
+        let path1_arc = manager.get(path1).expect("success").expect("existed");
+        assert!(Arc::ptr_eq(&path1_arc, &arc));
+
+        // Meanwhile, a new Arc for path2 has a different pointer, even though
+        // its Rkv's path is the same as arc's current path.
+        let path2_arc = manager.get_or_create(path2, Rkv::new).expect("success");
+        assert!(!Arc::ptr_eq(&path2_arc, &arc));
+    }
+
+    /// Test that the manager will return the same Rkv instance each time for each path.
+    #[test]
+    fn test_same_with_capacity() {
+        let root = Builder::new().prefix("test_same").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+
+        let mut manager = Manager::new();
+
+        let p = root.path();
+        assert!(manager.get(p).expect("success").is_none());
+
+        let created_arc = manager.get_or_create_with_capacity(p, 10, Rkv::with_capacity).expect("created");
+        let fetched_arc = manager.get(p).expect("success").expect("existed");
+        assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/migrate.rs
@@ -0,0 +1,1028 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+//! A utility for migrating data from one LMDB environment to another.
+//! Notably, this tool can migrate data from an enviroment created with
+//! a different bit-depth than the current rkv consumer, which enables
+//! the consumer to retrieve data from an environment that can't be read
+//! directly using the rkv APIs.
+//!
+//! The utility supports both 32-bit and 64-bit LMDB source environments,
+//! and it automatically migrates data in both the default database
+//! and any named (sub) databases.  It also migrates the source environment's
+//! "map size" and "max DBs" configuration options to the destination
+//! environment.
+//!
+//! The destination environment must be at the rkv consumer's bit depth
+//! and should be empty of data.  It can be an empty directory, in which case
+//! the utility will create a new LMDB environment within the directory.
+//!
+//! The tool currently has these limitations:
+//!
+//! 1. It doesn't support migration from environments created with
+//!    `EnvironmentFlags::NO_SUB_DIR`.  To migrate such an environment,
+//!    create a temporary directory, copy the environment's data file
+//!    to a file called data.mdb in the temporary directory, then migrate
+//!    the temporary directory as the source environment.
+//! 2. It doesn't support migration from databases created with
+//!    `DatabaseFlags::DUP_SORT` (with or without `DatabaseFlags::DUP_FIXED`).
+//! 3. It doesn't account for existing data in the destination environment,
+//!    which means that it can overwrite data (causing data loss) or fail
+//!    to migrate data if the destination environment contains existing data.
+//!
+//! ## Basic Usage
+//!
+//! Call `Migrator::new()` with the path to the source environment to create
+//! a `Migrator` instance; then call the instance's `migrate()` method
+//! with the path to the destination environment to migrate data from the source
+//! to the destination environment.  For example, this snippet migrates data
+//! from the tests/envs/ref_env_32 environment to a new environment
+//! in a temporary directory:
+//!
+//! ```
+//! use rkv::migrate::Migrator;
+//! use std::path::Path;
+//! use tempfile::tempdir;
+//! let mut migrator = Migrator::new(Path::new("tests/envs/ref_env_32")).unwrap();
+//! migrator.migrate(&tempdir().unwrap().path()).unwrap();
+//! ```
+//!
+//! Both `Migrator::new()` and `migrate()` return a `MigrateResult` that is
+//! either an `Ok()` result or an `Err<MigrateError>`, where `MigrateError`
+//! is an enum whose variants identify specific kinds of migration failures.
+
+pub use crate::error::MigrateError;
+use bitflags::bitflags;
+use byteorder::{
+    LittleEndian,
+    ReadBytesExt,
+};
+use lmdb::{
+    DatabaseFlags,
+    Environment,
+    Transaction,
+    WriteFlags,
+};
+use std::{
+    collections::{
+        BTreeMap,
+        HashMap,
+    },
+    convert::TryFrom,
+    fs::File,
+    io::{
+        Cursor,
+        Read,
+        Seek,
+        SeekFrom,
+        Write,
+    },
+    path::{
+        Path,
+        PathBuf,
+    },
+    rc::Rc,
+    str,
+};
+
+const PAGESIZE: u16 = 4096;
+
+// The magic number is 0xBEEFC0DE, which is 0xDEC0EFBE in little-endian.
+// It appears at offset 12 on 32-bit systems and 16 on 64-bit systems.
+// We don't support big-endian migration, but presumably we could do so
+// by detecting the order of the bytes.
+const MAGIC: [u8; 4] = [0xDE, 0xC0, 0xEF, 0xBE];
+
+pub type MigrateResult<T> = Result<T, MigrateError>;
+
+bitflags! {
+    #[derive(Default)]
+    struct PageFlags: u16 {
+        const BRANCH = 0x01;
+        const LEAF = 0x02;
+        const OVERFLOW = 0x04;
+        const META = 0x08;
+        const DIRTY = 0x10;
+        const LEAF2 = 0x20;
+        const SUBP = 0x40;
+        const LOOSE = 0x4000;
+        const KEEP = 0x8000;
+    }
+}
+
+bitflags! {
+    #[derive(Default)]
+    struct NodeFlags: u16 {
+        const BIGDATA = 0x01;
+        const SUBDATA = 0x02;
+        const DUPDATA = 0x04;
+    }
+}
+
+// The bit depth of the executable that created an LMDB environment.
+// The Migrator determines this automatically based on the location of
+// the magic number in the data.mdb file.
+#[derive(Clone, Copy, PartialEq)]
+enum Bits {
+    U32,
+    U64,
+}
+
+impl Bits {
+    // The size of usize for the bit-depth represented by the enum variant.
+    fn size(self) -> usize {
+        match self {
+            Bits::U32 => 4,
+            Bits::U64 => 8,
+        }
+    }
+}
+
+// The equivalent of PAGEHDRSZ in LMDB, except that this one varies by bits.
+fn page_header_size(bits: Bits) -> u64 {
+    match bits {
+        Bits::U32 => 12,
+        Bits::U64 => 16,
+    }
+}
+
+// The equivalent of P_INVALID in LMDB, except that this one varies by bits.
+fn validate_page_num(page_num: u64, bits: Bits) -> MigrateResult<()> {
+    let invalid_page_num = match bits {
+        Bits::U32 => u64::from(!0u32),
+        Bits::U64 => !0u64,
+    };
+
+    if page_num == invalid_page_num {
+        return Err(MigrateError::InvalidPageNum);
+    }
+
+    Ok(())
+}
+
+#[derive(Clone, Debug, Default)]
+struct Database {
+    md_pad: u32,
+    md_flags: DatabaseFlags,
+    md_depth: u16,
+    md_branch_pages: u64,
+    md_leaf_pages: u64,
+    md_overflow_pages: u64,
+    md_entries: u64,
+    md_root: u64,
+}
+
+impl Database {
+    fn new(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<Database> {
+        Ok(Database {
+            md_pad: cursor.read_u32::<LittleEndian>()?,
+            md_flags: DatabaseFlags::from_bits(cursor.read_u16::<LittleEndian>()?.into())
+                .ok_or(MigrateError::InvalidDatabaseBits)?,
+            md_depth: cursor.read_u16::<LittleEndian>()?,
+            md_branch_pages: cursor.read_uint::<LittleEndian>(bits.size())?,
+            md_leaf_pages: cursor.read_uint::<LittleEndian>(bits.size())?,
+            md_overflow_pages: cursor.read_uint::<LittleEndian>(bits.size())?,
+            md_entries: cursor.read_uint::<LittleEndian>(bits.size())?,
+            md_root: cursor.read_uint::<LittleEndian>(bits.size())?,
+        })
+    }
+}
+
+#[derive(Debug, Default)]
+struct Databases {
+    free: Database,
+    main: Database,
+}
+
+#[derive(Debug, Default)]
+struct MetaData {
+    mm_magic: u32,
+    mm_version: u32,
+    mm_address: u64,
+    mm_mapsize: u64,
+    mm_dbs: Databases,
+    mm_last_pg: u64,
+    mm_txnid: u64,
+}
+
+#[derive(Debug)]
+enum LeafNode {
+    Regular {
+        mn_lo: u16,
+        mn_hi: u16,
+        mn_flags: NodeFlags,
+        mn_ksize: u16,
+        mv_size: u32,
+        key: Vec<u8>,
+        value: Vec<u8>,
+    },
+    BigData {
+        mn_lo: u16,
+        mn_hi: u16,
+        mn_flags: NodeFlags,
+        mn_ksize: u16,
+        mv_size: u32,
+        key: Vec<u8>,
+        overflow_pgno: u64,
+    },
+    SubData {
+        mn_lo: u16,
+        mn_hi: u16,
+        mn_flags: NodeFlags,
+        mn_ksize: u16,
+        mv_size: u32,
+        key: Vec<u8>,
+        value: Vec<u8>,
+        db: Database,
+    },
+}
+
+#[derive(Debug, Default)]
+struct BranchNode {
+    mp_pgno: u64,
+    mn_ksize: u16,
+    mn_data: Vec<u8>,
+}
+
+#[derive(Debug)]
+enum PageHeader {
+    Regular {
+        mp_pgno: u64,
+        mp_flags: PageFlags,
+        pb_lower: u16,
+        pb_upper: u16,
+    },
+    Overflow {
+        mp_pgno: u64,
+        mp_flags: PageFlags,
+        pb_pages: u32,
+    },
+}
+
+#[derive(Debug)]
+enum Page {
+    META(MetaData),
+    LEAF(Vec<LeafNode>),
+    BRANCH(Vec<BranchNode>),
+}
+
+impl Page {
+    fn new(buf: Vec<u8>, bits: Bits) -> MigrateResult<Page> {
+        let mut cursor = std::io::Cursor::new(&buf[..]);
+
+        match Self::parse_page_header(&mut cursor, bits)? {
+            PageHeader::Regular {
+                mp_flags,
+                pb_lower,
+                ..
+            } => {
+                if mp_flags.contains(PageFlags::LEAF2) || mp_flags.contains(PageFlags::SUBP) {
+                    // We don't yet support DUPFIXED and DUPSORT databases.
+                    return Err(MigrateError::UnsupportedPageHeaderVariant);
+                }
+
+                if mp_flags.contains(PageFlags::META) {
+                    let meta_data = Self::parse_meta_data(&mut cursor, bits)?;
+                    Ok(Page::META(meta_data))
+                } else if mp_flags.contains(PageFlags::LEAF) {
+                    let nodes = Self::parse_leaf_nodes(&mut cursor, pb_lower, bits)?;
+                    Ok(Page::LEAF(nodes))
+                } else if mp_flags.contains(PageFlags::BRANCH) {
+                    let nodes = Self::parse_branch_nodes(&mut cursor, pb_lower, bits)?;
+                    Ok(Page::BRANCH(nodes))
+                } else {
+                    Err(MigrateError::UnexpectedPageHeaderVariant)
+                }
+            },
+            PageHeader::Overflow {
+                ..
+            } => {
+                // There isn't anything to do, nor should we try to instantiate
+                // a page of this type, as we only access them when reading
+                // a value that is too large to fit into a leaf node.
+                Err(MigrateError::UnexpectedPageHeaderVariant)
+            },
+        }
+    }
+
+    fn parse_page_header(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<PageHeader> {
+        let mp_pgno = cursor.read_uint::<LittleEndian>(bits.size())?;
+        let _mp_pad = cursor.read_u16::<LittleEndian>()?;
+        let mp_flags = PageFlags::from_bits(cursor.read_u16::<LittleEndian>()?).ok_or(MigrateError::InvalidPageBits)?;
+
+        if mp_flags.contains(PageFlags::OVERFLOW) {
+            let pb_pages = cursor.read_u32::<LittleEndian>()?;
+            Ok(PageHeader::Overflow {
+                mp_pgno,
+                mp_flags,
+                pb_pages,
+            })
+        } else {
+            let pb_lower = cursor.read_u16::<LittleEndian>()?;
+            let pb_upper = cursor.read_u16::<LittleEndian>()?;
+            Ok(PageHeader::Regular {
+                mp_pgno,
+                mp_flags,
+                pb_lower,
+                pb_upper,
+            })
+        }
+    }
+
+    fn parse_meta_data(mut cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<MetaData> {
+        cursor.seek(SeekFrom::Start(page_header_size(bits)))?;
+
+        Ok(MetaData {
+            mm_magic: cursor.read_u32::<LittleEndian>()?,
+            mm_version: cursor.read_u32::<LittleEndian>()?,
+            mm_address: cursor.read_uint::<LittleEndian>(bits.size())?,
+            mm_mapsize: cursor.read_uint::<LittleEndian>(bits.size())?,
+            mm_dbs: Databases {
+                free: Database::new(&mut cursor, bits)?,
+                main: Database::new(&mut cursor, bits)?,
+            },
+            mm_last_pg: cursor.read_uint::<LittleEndian>(bits.size())?,
+            mm_txnid: cursor.read_uint::<LittleEndian>(bits.size())?,
+        })
+    }
+
+    fn parse_leaf_nodes(cursor: &mut Cursor<&[u8]>, pb_lower: u16, bits: Bits) -> MigrateResult<Vec<LeafNode>> {
+        cursor.set_position(page_header_size(bits));
+        let num_keys = Self::num_keys(pb_lower, bits);
+        let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?;
+
+        let mut leaf_nodes = Vec::with_capacity(num_keys as usize);
+
+        for mp_ptr in mp_ptrs {
+            cursor.set_position(u64::from(mp_ptr));
+            leaf_nodes.push(Self::parse_leaf_node(cursor, bits)?);
+        }
+
+        Ok(leaf_nodes)
+    }
+
+    fn parse_leaf_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<LeafNode> {
+        // The order of the mn_lo and mn_hi fields is endian-dependent and would
+        // be reversed in an LMDB environment created on a big-endian system.
+        let mn_lo = cursor.read_u16::<LittleEndian>()?;
+        let mn_hi = cursor.read_u16::<LittleEndian>()?;
+
+        let mn_flags = NodeFlags::from_bits(cursor.read_u16::<LittleEndian>()?).ok_or(MigrateError::InvalidNodeBits)?;
+        let mn_ksize = cursor.read_u16::<LittleEndian>()?;
+
+        let start = usize::try_from(cursor.position())?;
+        let end = usize::try_from(cursor.position() + u64::from(mn_ksize))?;
+        let key = cursor.get_ref()[start..end].to_vec();
+        cursor.set_position(end as u64);
+
+        let mv_size = Self::leaf_node_size(mn_lo, mn_hi);
+        if mn_flags.contains(NodeFlags::BIGDATA) {
+            let overflow_pgno = cursor.read_uint::<LittleEndian>(bits.size())?;
+
+            Ok(LeafNode::BigData {
+                mn_lo,
+                mn_hi,
+                mn_flags,
+                mn_ksize,
+                mv_size,
+                key,
+                overflow_pgno,
+            })
+        } else if mn_flags.contains(NodeFlags::SUBDATA) {
+            let start = usize::try_from(cursor.position())?;
+            let end = usize::try_from(cursor.position() + u64::from(mv_size))?;
+            let value = cursor.get_ref()[start..end].to_vec();
+            let mut cursor = std::io::Cursor::new(&value[..]);
+            let db = Database::new(&mut cursor, bits)?;
+            validate_page_num(db.md_root, bits)?;
+
+            Ok(LeafNode::SubData {
+                mn_lo,
+                mn_hi,
+                mn_flags,
+                mn_ksize,
+                mv_size,
+                key,
+                value,
+                db,
+            })
+        } else {
+            let start = usize::try_from(cursor.position())?;
+            let end = usize::try_from(cursor.position() + u64::from(mv_size))?;
+            let value = cursor.get_ref()[start..end].to_vec();
+
+            Ok(LeafNode::Regular {
+                mn_lo,
+                mn_hi,
+                mn_flags,
+                mn_ksize,
+                mv_size,
+                key,
+                value,
+            })
+        }
+    }
+
+    fn leaf_node_size(mn_lo: u16, mn_hi: u16) -> u32 {
+        u32::from(mn_lo) + ((u32::from(mn_hi)) << 16)
+    }
+
+    fn parse_branch_nodes(cursor: &mut Cursor<&[u8]>, pb_lower: u16, bits: Bits) -> MigrateResult<Vec<BranchNode>> {
+        let num_keys = Self::num_keys(pb_lower, bits);
+        let mp_ptrs = Self::parse_mp_ptrs(cursor, num_keys)?;
+
+        let mut branch_nodes = Vec::with_capacity(num_keys as usize);
+
+        for mp_ptr in mp_ptrs {
+            cursor.set_position(u64::from(mp_ptr));
+            branch_nodes.push(Self::parse_branch_node(cursor, bits)?)
+        }
+
+        Ok(branch_nodes)
+    }
+
+    fn parse_branch_node(cursor: &mut Cursor<&[u8]>, bits: Bits) -> MigrateResult<BranchNode> {
+        // The order of the mn_lo and mn_hi fields is endian-dependent and would
+        // be reversed in an LMDB environment created on a big-endian system.
+        let mn_lo = cursor.read_u16::<LittleEndian>()?;
+        let mn_hi = cursor.read_u16::<LittleEndian>()?;
+
+        let mn_flags = cursor.read_u16::<LittleEndian>()?;
+
+        // Branch nodes overload the mn_lo, mn_hi, and mn_flags fields
+        // to store the page number, so we derive the number from those fields.
+        let mp_pgno = Self::branch_node_page_num(mn_lo, mn_hi, mn_flags, bits);
+
+        let mn_ksize = cursor.read_u16::<LittleEndian>()?;
+
+        let position = cursor.position();
+        let start = usize::try_from(position)?;
+        let end = usize::try_from(position + u64::from(mn_ksize))?;
+        let mn_data = cursor.get_ref()[start..end].to_vec();
+        cursor.set_position(end as u64);
+
+        Ok(BranchNode {
+            mp_pgno,
+            mn_ksize,
+            mn_data,
+        })
+    }
+
+    fn branch_node_page_num(mn_lo: u16, mn_hi: u16, mn_flags: u16, bits: Bits) -> u64 {
+        let mut page_num = u64::from(u32::from(mn_lo) + (u32::from(mn_hi) << 16));
+        if bits == Bits::U64 {
+            page_num += u64::from(mn_flags) << 32;
+        }
+        page_num
+    }
+
+    fn parse_mp_ptrs(cursor: &mut Cursor<&[u8]>, num_keys: u64) -> MigrateResult<Vec<u16>> {
+        let mut mp_ptrs = Vec::with_capacity(num_keys as usize);
+        for _ in 0..num_keys {
+            mp_ptrs.push(cursor.read_u16::<LittleEndian>()?);
+        }
+        Ok(mp_ptrs)
+    }
+
+    fn num_keys(pb_lower: u16, bits: Bits) -> u64 {
+        (u64::from(pb_lower) - page_header_size(bits)) >> 1
+    }
+}
+
+pub struct Migrator {
+    file: File,
+    bits: Bits,
+}
+
+impl Migrator {
+    /// Create a new Migrator for the LMDB environment at the given path.
+    /// This tries to open the data.mdb file in the environment and determine
+    /// the bit depth of the executable that created it, so it can fail
+    /// and return an Err if the file can't be opened or the depth determined.
+    pub fn new(path: &Path) -> MigrateResult<Migrator> {
+        let mut path = PathBuf::from(path);
+        path.push("data.mdb");
+        let mut file = File::open(&path)?;
+
+        file.seek(SeekFrom::Start(page_header_size(Bits::U32)))?;
+        let mut buf = [0; 4];
+        file.read_exact(&mut buf)?;
+
+        let bits = if buf == MAGIC {
+            Bits::U32
+        } else {
+            file.seek(SeekFrom::Start(page_header_size(Bits::U64)))?;
+            file.read_exact(&mut buf)?;
+            if buf == MAGIC {
+                Bits::U64
+            } else {
+                return Err(MigrateError::IndeterminateBitDepth);
+            }
+        };
+
+        Ok(Migrator {
+            file,
+            bits,
+        })
+    }
+
+    /// Dump the data in one of the databases in the LMDB environment.
+    /// If the `database` paremeter is None, then we dump the data in the main
+    /// database.  If it's the name of a subdatabase, then we dump the data
+    /// in that subdatabase.
+    ///
+    /// Note that the output isn't identical to that of the mdb_dump utility,
+    /// since mdb_dump includes subdatabase key/value pairs when dumping
+    /// the main database, and those values are architecture-dependent, since
+    /// they contain pointer-sized data.
+    ///
+    /// If we wanted to support identical output, we could parameterize
+    /// inclusion of subdatabase pairs in get_pairs() and include them
+    /// when dumping data, while continuing to exclude them when migrating
+    /// data.
+    pub fn dump<T: Write>(&mut self, database: Option<&str>, mut out: T) -> MigrateResult<()> {
+        let meta_data = self.get_meta_data()?;
+        let root_page_num = meta_data.mm_dbs.main.md_root;
+        let root_page = Rc::new(self.get_page(root_page_num)?);
+
+        let pairs;
+        if let Some(database) = database {
+            let subdbs = self.get_subdbs(root_page)?;
+            let database =
+                subdbs.get(database.as_bytes()).ok_or_else(|| MigrateError::DatabaseNotFound(database.to_string()))?;
+            let root_page_num = database.md_root;
+            let root_page = Rc::new(self.get_page(root_page_num)?);
+            pairs = self.get_pairs(root_page)?;
+        } else {
+            pairs = self.get_pairs(root_page)?;
+        }
+
+        out.write_all(b"VERSION=3\n")?;
+        out.write_all(b"format=bytevalue\n")?;
+        if let Some(database) = database {
+            writeln!(out, "database={}", database)?;
+        }
+        out.write_all(b"type=btree\n")?;
+        writeln!(out, "mapsize={}", meta_data.mm_mapsize)?;
+        out.write_all(b"maxreaders=126\n")?;
+        out.write_all(b"db_pagesize=4096\n")?;
+        out.write_all(b"HEADER=END\n")?;
+
+        for (key, value) in pairs {
+            out.write_all(b" ")?;
+            for byte in key {
+                write!(out, "{:02x}", byte)?;
+            }
+            out.write_all(b"\n")?;
+            out.write_all(b" ")?;
+            for byte in value {
+                write!(out, "{:02x}", byte)?;
+            }
+            out.write_all(b"\n")?;
+        }
+
+        out.write_all(b"DATA=END\n")?;
+
+        Ok(())
+    }
+
+    /// Migrate all data in all of databases in the existing LMDB environment
+    /// to a new environment.  This includes all key/value pairs in the main
+    /// database that aren't metadata about subdatabases and all key/value pairs
+    /// in all subdatabases.
+    ///
+    /// We also set the map size and maximum databases of the new environment
+    /// to their values for the existing environment.  But we don't set
+    /// other metadata, and we don't check that the new environment is empty
+    /// before migrating data.
+    ///
+    /// Thus it's possible for this to overwrite existing data or fail
+    /// to migrate data if the new environment isn't empty.  It's the consumer's
+    /// responsibility to ensure that data can be safely migrated to the new
+    /// environment.  In general, this means that environment should be empty.
+    pub fn migrate(&mut self, dest: &Path) -> MigrateResult<()> {
+        let meta_data = self.get_meta_data()?;
+        let root_page_num = meta_data.mm_dbs.main.md_root;
+        validate_page_num(root_page_num, self.bits)?;
+        let root_page = Rc::new(self.get_page(root_page_num)?);
+        let subdbs = self.get_subdbs(Rc::clone(&root_page))?;
+
+        let env = Environment::new()
+            .set_map_size(meta_data.mm_mapsize as usize)
+            .set_max_dbs(subdbs.len() as u32)
+            .open(dest)?;
+
+        // Create the databases before we open a read-write transaction,
+        // since database creation requires its own read-write transaction,
+        // which would hang while awaiting completion of an existing one.
+        env.create_db(None, meta_data.mm_dbs.main.md_flags)?;
+        for (subdb_name, subdb_info) in &subdbs {
+            env.create_db(Some(str::from_utf8(&subdb_name)?), subdb_info.md_flags)?;
+        }
+
+        // Now open the read-write transaction that we'll use to migrate
+        // all the data.
+        let mut txn = env.begin_rw_txn()?;
+
+        // Migrate the main database.
+        let pairs = self.get_pairs(root_page)?;
+        let db = env.open_db(None)?;
+        for (key, value) in pairs {
+            // If we knew that the target database was empty, we could
+            // specify WriteFlags::APPEND to speed up the migration.
+            txn.put(db, &key, &value, WriteFlags::empty())?;
+        }
+
+        // Migrate subdatabases.
+        for (subdb_name, subdb_info) in &subdbs {
+            let root_page = Rc::new(self.get_page(subdb_info.md_root)?);
+            let pairs = self.get_pairs(root_page)?;
+            let db = env.open_db(Some(str::from_utf8(&subdb_name)?))?;
+            for (key, value) in pairs {
+                // If we knew that the target database was empty, we could
+                // specify WriteFlags::APPEND to speed up the migration.
+                txn.put(db, &key, &value, WriteFlags::empty())?;
+            }
+        }
+
+        txn.commit()?;
+
+        Ok(())
+    }
+
+    fn get_subdbs(&mut self, root_page: Rc<Page>) -> MigrateResult<HashMap<Vec<u8>, Database>> {
+        let mut subdbs = HashMap::new();
+        let mut pages = vec![root_page];
+
+        while let Some(page) = pages.pop() {
+            match &*page {
+                Page::BRANCH(nodes) => {
+                    for branch in nodes {
+                        pages.push(Rc::new(self.get_page(branch.mp_pgno)?));
+                    }
+                },
+                Page::LEAF(nodes) => {
+                    for leaf in nodes {
+                        if let LeafNode::SubData {
+                            key,
+                            db,
+                            ..
+                        } = leaf
+                        {
+                            subdbs.insert(key.to_vec(), db.clone());
+                        };
+                    }
+                },
+                _ => {
+                    return Err(MigrateError::UnexpectedPageVariant);
+                },
+            }
+        }
+
+        Ok(subdbs)
+    }
+
+    fn get_pairs(&mut self, root_page: Rc<Page>) -> MigrateResult<BTreeMap<Vec<u8>, Vec<u8>>> {
+        let mut pairs = BTreeMap::new();
+        let mut pages = vec![root_page];
+
+        while let Some(page) = pages.pop() {
+            match &*page {
+                Page::BRANCH(nodes) => {
+                    for branch in nodes {
+                        pages.push(Rc::new(self.get_page(branch.mp_pgno)?));
+                    }
+                },
+                Page::LEAF(nodes) => {
+                    for leaf in nodes {
+                        match leaf {
+                            LeafNode::Regular {
+                                key,
+                                value,
+                                ..
+                            } => {
+                                pairs.insert(key.to_vec(), value.to_vec());
+                            },
+                            LeafNode::BigData {
+                                mv_size,
+                                key,
+                                overflow_pgno,
+                                ..
+                            } => {
+                                // XXX perhaps we could reduce memory consumption
+                                // during a migration by waiting to read big data
+                                // until it's time to write it to the new database.
+                                let value = self.read_data(
+                                    *overflow_pgno * u64::from(PAGESIZE) + page_header_size(self.bits),
+                                    *mv_size as usize,
+                                )?;
+                                pairs.insert(key.to_vec(), value);
+                            },
+                            LeafNode::SubData {
+                                ..
+                            } => {
+                                // We don't include subdatabase leaves in pairs,
+                                // since there's no architecture-neutral
+                                // representation of them, and in any case they're
+                                // meta-data that should get recreated when we
+                                // migrate the subdatabases themselves.
+                                //
+                                // If we wanted to create identical dumps to those
+                                // produced by mdb_dump, however, we could allow
+                                // consumers to specify that they'd like to include
+                                // these records.
+                            },
+                        };
+                    }
+                },
+                _ => {
+                    return Err(MigrateError::UnexpectedPageVariant);
+                },
+            }
+        }
+
+        Ok(pairs)
+    }
+
+    fn read_data(&mut self, offset: u64, size: usize) -> MigrateResult<Vec<u8>> {
+        self.file.seek(SeekFrom::Start(offset))?;
+        let mut buf: Vec<u8> = vec![0; size];
+        self.file.read_exact(&mut buf[0..size])?;
+        Ok(buf.to_vec())
+    }
+
+    fn get_page(&mut self, page_no: u64) -> MigrateResult<Page> {
+        Page::new(self.read_data(page_no * u64::from(PAGESIZE), usize::from(PAGESIZE))?, self.bits)
+    }
+
+    fn get_meta_data(&mut self) -> MigrateResult<MetaData> {
+        let (page0, page1) = (self.get_page(0)?, self.get_page(1)?);
+
+        match (page0, page1) {
+            (Page::META(meta0), Page::META(meta1)) => {
+                let meta = if meta1.mm_txnid > meta0.mm_txnid {
+                    meta1
+                } else {
+                    meta0
+                };
+                if meta.mm_magic != 0xBE_EF_C0_DE {
+                    return Err(MigrateError::InvalidMagicNum);
+                }
+                if meta.mm_version != 1 && meta.mm_version != 999 {
+                    return Err(MigrateError::InvalidDataVersion);
+                }
+                Ok(meta)
+            },
+            _ => Err(MigrateError::UnexpectedPageVariant),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::MigrateResult;
+    use super::Migrator;
+    use crate::error::MigrateError;
+    use lmdb::{
+        Environment,
+        Error as LmdbError,
+    };
+    use std::{
+        env,
+        fs::{
+            self,
+            File,
+        },
+        io::{
+            Read,
+            Seek,
+            SeekFrom,
+        },
+        mem::size_of,
+        path::PathBuf,
+    };
+    use tempfile::{
+        tempdir,
+        tempfile,
+    };
+
+    fn compare_files(ref_file: &mut File, new_file: &mut File) -> MigrateResult<()> {
+        ref_file.seek(SeekFrom::Start(0))?;
+        new_file.seek(SeekFrom::Start(0))?;
+
+        let ref_buf = &mut [0; 1024];
+        let new_buf = &mut [0; 1024];
+
+        loop {
+            match ref_file.read(ref_buf) {
+                Err(err) => panic!(err),
+                Ok(ref_len) => match new_file.read(new_buf) {
+                    Err(err) => panic!(err),
+                    Ok(new_len) => {
+                        assert_eq!(ref_len, new_len);
+                        if ref_len == 0 {
+                            break;
+                        };
+                        assert_eq!(ref_buf[0..ref_len], new_buf[0..new_len]);
+                    },
+                },
+            }
+        }
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_dump_32() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect();
+
+        // Dump data from the test env to a new dump file.
+        let mut migrator = Migrator::new(&test_env_path)?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(None, &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_dump_32_subdb() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect();
+
+        // Dump data from the test env to a new dump file.
+        let mut migrator = Migrator::new(&test_env_path)?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(Some("subdb"), &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_dump_64() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect();
+
+        // Dump data from the test env to a new dump file.
+        let mut migrator = Migrator::new(&test_env_path)?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(None, &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_dump_64_subdb() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect();
+
+        // Dump data from the test env to a new dump file.
+        let mut migrator = Migrator::new(&test_env_path)?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(Some("subdb"), &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_migrate_64() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_64"].iter().collect();
+
+        // Migrate data from the old env to a new one.
+        let new_env = tempdir()?;
+        let mut migrator = Migrator::new(&test_env_path)?;
+        migrator.migrate(new_env.path())?;
+
+        // Dump data from the new env to a new dump file.
+        let mut migrator = Migrator::new(&new_env.path())?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(Some("subdb"), &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_migrate_32() -> MigrateResult<()> {
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", "ref_env_32"].iter().collect();
+
+        // Migrate data from the old env to a new one.
+        let new_env = tempdir()?;
+        let mut migrator = Migrator::new(&test_env_path)?;
+        migrator.migrate(new_env.path())?;
+
+        // Dump data from the new env to a new dump file.
+        let mut migrator = Migrator::new(&new_env.path())?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(Some("subdb"), &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_migrate_and_replace() -> MigrateResult<()> {
+        let test_env_name = match size_of::<usize>() {
+            4 => "ref_env_64",
+            8 => "ref_env_32",
+            _ => panic!("only 32- and 64-bit depths are supported"),
+        };
+
+        let cwd = env::current_dir()?;
+        let cwd = cwd.to_str().ok_or(MigrateError::StringConversionError)?;
+        let test_env_path: PathBuf = [cwd, "tests", "envs", test_env_name].iter().collect();
+
+        let old_env = tempdir()?;
+        fs::copy(test_env_path.join("data.mdb"), old_env.path().join("data.mdb"))?;
+        fs::copy(test_env_path.join("lock.mdb"), old_env.path().join("lock.mdb"))?;
+
+        // Confirm that it isn't possible to open the old environment with LMDB.
+        assert_eq!(
+            match Environment::new().open(&old_env.path()) {
+                Err(err) => err,
+                _ => panic!("opening the environment should have failed"),
+            },
+            LmdbError::Invalid
+        );
+
+        // Migrate data from the old env to a new one.
+        let new_env = tempdir()?;
+        let mut migrator = Migrator::new(&old_env.path())?;
+        migrator.migrate(new_env.path())?;
+
+        // Dump data from the new env to a new dump file.
+        let mut migrator = Migrator::new(&new_env.path())?;
+        let mut new_dump_file = tempfile()?;
+        migrator.dump(Some("subdb"), &new_dump_file)?;
+
+        // Open the reference dump file.
+        let ref_dump_file_path: PathBuf = [cwd, "tests", "envs", "ref_dump_subdb.txt"].iter().collect();
+        let mut ref_dump_file = File::open(ref_dump_file_path)?;
+
+        // Compare the new dump file to the reference dump file.
+        compare_files(&mut ref_dump_file, &mut new_dump_file)?;
+
+        // Overwrite the old env's files with the new env's files and confirm
+        // that it's now possible to open the old env with LMDB.
+        fs::copy(new_env.path().join("data.mdb"), old_env.path().join("data.mdb"))?;
+        fs::copy(new_env.path().join("lock.mdb"), old_env.path().join("lock.mdb"))?;
+        assert!(Environment::new().open(&old_env.path()).is_ok());
+
+        Ok(())
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/readwrite.rs
@@ -0,0 +1,95 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use lmdb::{
+    Database,
+    RoCursor,
+    RoTransaction,
+    RwTransaction,
+    Transaction,
+    WriteFlags,
+};
+
+use crate::error::StoreError;
+use crate::read_transform;
+use crate::value::Value;
+
+pub struct Reader<'env>(pub RoTransaction<'env>);
+pub struct Writer<'env>(pub RwTransaction<'env>);
+
+pub trait Readable {
+    fn get<K: AsRef<[u8]>>(&self, db: Database, k: &K) -> Result<Option<Value>, StoreError>;
+    fn open_ro_cursor(&self, db: Database) -> Result<RoCursor, StoreError>;
+}
+
+impl<'env> Readable for Reader<'env> {
+    fn get<K: AsRef<[u8]>>(&self, db: Database, k: &K) -> Result<Option<Value>, StoreError> {
+        let bytes = self.0.get(db, &k);
+        read_transform(bytes)
+    }
+
+    fn open_ro_cursor(&self, db: Database) -> Result<RoCursor, StoreError> {
+        self.0.open_ro_cursor(db).map_err(StoreError::LmdbError)
+    }
+}
+
+impl<'env> Reader<'env> {
+    pub(crate) fn new(txn: RoTransaction) -> Reader {
+        Reader(txn)
+    }
+
+    pub fn abort(self) {
+        self.0.abort();
+    }
+}
+
+impl<'env> Readable for Writer<'env> {
+    fn get<K: AsRef<[u8]>>(&self, db: Database, k: &K) -> Result<Option<Value>, StoreError> {
+        let bytes = self.0.get(db, &k);
+        read_transform(bytes)
+    }
+
+    fn open_ro_cursor(&self, db: Database) -> Result<RoCursor, StoreError> {
+        self.0.open_ro_cursor(db).map_err(StoreError::LmdbError)
+    }
+}
+
+impl<'env> Writer<'env> {
+    pub(crate) fn new(txn: RwTransaction) -> Writer {
+        Writer(txn)
+    }
+
+    pub fn commit(self) -> Result<(), StoreError> {
+        self.0.commit().map_err(StoreError::LmdbError)
+    }
+
+    pub fn abort(self) {
+        self.0.abort();
+    }
+
+    pub(crate) fn put<K: AsRef<[u8]>>(
+        &mut self,
+        db: Database,
+        k: &K,
+        v: &Value,
+        flags: WriteFlags,
+    ) -> Result<(), StoreError> {
+        // TODO: don't allocate twice.
+        self.0.put(db, &k, &v.to_bytes()?, flags).map_err(StoreError::LmdbError)
+    }
+
+    pub(crate) fn delete<K: AsRef<[u8]>>(&mut self, db: Database, k: &K, v: Option<&[u8]>) -> Result<(), StoreError> {
+        self.0.del(db, &k, v).map_err(StoreError::LmdbError)
+    }
+
+    pub(crate) fn clear(&mut self, db: Database) -> Result<(), StoreError> {
+        self.0.clear_db(db).map_err(StoreError::LmdbError)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/store.rs
@@ -0,0 +1,21 @@
+pub mod integer;
+pub mod integermulti;
+pub mod multi;
+pub mod single;
+
+use lmdb::DatabaseFlags;
+
+#[derive(Default, Debug, Copy, Clone)]
+pub struct Options {
+    pub create: bool,
+    pub flags: DatabaseFlags,
+}
+
+impl Options {
+    pub fn create() -> Options {
+        Options {
+            create: true,
+            flags: DatabaseFlags::empty(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/store/integer.rs
@@ -0,0 +1,172 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::marker::PhantomData;
+
+use bincode::serialize;
+
+use serde::Serialize;
+
+use lmdb::Database;
+
+use crate::error::{
+    DataError,
+    StoreError,
+};
+
+use crate::readwrite::{
+    Readable,
+    Writer,
+};
+
+use crate::value::Value;
+
+use crate::store::single::SingleStore;
+
+pub trait EncodableKey {
+    fn to_bytes(&self) -> Result<Vec<u8>, DataError>;
+}
+
+pub trait PrimitiveInt: EncodableKey {}
+
+impl PrimitiveInt for u32 {}
+
+impl<T> EncodableKey for T
+where
+    T: Serialize,
+{
+    fn to_bytes(&self) -> Result<Vec<u8>, DataError> {
+        serialize(self) // TODO: limited key length.
+            .map_err(Into::into)
+    }
+}
+
+pub(crate) struct Key<K> {
+    bytes: Vec<u8>,
+    phantom: PhantomData<K>,
+}
+
+impl<K> AsRef<[u8]> for Key<K>
+where
+    K: EncodableKey,
+{
+    fn as_ref(&self) -> &[u8] {
+        self.bytes.as_ref()
+    }
+}
+
+impl<K> Key<K>
+where
+    K: EncodableKey,
+{
+    #[allow(clippy::new_ret_no_self)]
+    pub(crate) fn new(k: &K) -> Result<Key<K>, DataError> {
+        Ok(Key {
+            bytes: k.to_bytes()?,
+            phantom: PhantomData,
+        })
+    }
+}
+
+pub struct IntegerStore<K>
+where
+    K: PrimitiveInt,
+{
+    inner: SingleStore,
+    phantom: PhantomData<K>,
+}
+
+impl<K> IntegerStore<K>
+where
+    K: PrimitiveInt,
+{
+    pub(crate) fn new(db: Database) -> IntegerStore<K> {
+        IntegerStore {
+            inner: SingleStore::new(db),
+            phantom: PhantomData,
+        }
+    }
+
+    pub fn get<'env, T: Readable>(&self, reader: &'env T, k: K) -> Result<Option<Value<'env>>, StoreError> {
+        self.inner.get(reader, Key::new(&k)?)
+    }
+
+    pub fn put(&self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        self.inner.put(writer, Key::new(&k)?, v)
+    }
+
+    pub fn delete(&self, writer: &mut Writer, k: K) -> Result<(), StoreError> {
+        self.inner.delete(writer, Key::new(&k)?)
+    }
+
+    pub fn clear(&self, writer: &mut Writer) -> Result<(), StoreError> {
+        self.inner.clear(writer)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::fs;
+    use tempfile::Builder;
+
+    use super::*;
+    use crate::*;
+
+    #[test]
+    fn test_integer_keys() {
+        let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s = k.open_integer("s", StoreOptions::create()).expect("open");
+
+        macro_rules! test_integer_keys {
+            ($type:ty, $key:expr) => {{
+                let mut writer = k.write().expect("writer");
+
+                s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
+                assert_eq!(s.get(&writer, $key).expect("read"), Some(Value::Str("hello!")));
+                writer.commit().expect("committed");
+
+                let reader = k.read().expect("reader");
+                assert_eq!(s.get(&reader, $key).expect("read"), Some(Value::Str("hello!")));
+            }};
+        }
+
+        test_integer_keys!(u32, std::u32::MIN);
+        test_integer_keys!(u32, std::u32::MAX);
+    }
+
+    #[test]
+    fn test_clear() {
+        let root = Builder::new().prefix("test_integer_clear").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s = k.open_integer("s", StoreOptions::create()).expect("open");
+
+        {
+            let mut writer = k.write().expect("writer");
+            s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
+            s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
+            s.put(&mut writer, 3, &Value::Str("hello!")).expect("write");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            s.clear(&mut writer).expect("cleared");
+            writer.commit().expect("committed");
+
+            let reader = k.read().expect("reader");
+            assert_eq!(s.get(&reader, 1).expect("read"), None);
+            assert_eq!(s.get(&reader, 2).expect("read"), None);
+            assert_eq!(s.get(&reader, 3).expect("read"), None);
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/store/integermulti.rs
@@ -0,0 +1,144 @@
+// Copyright 2018 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use lmdb::{
+    Database,
+    WriteFlags,
+};
+
+use std::marker::PhantomData;
+
+use crate::error::StoreError;
+
+use crate::readwrite::{
+    Readable,
+    Writer,
+};
+
+use crate::value::Value;
+
+use crate::store::multi::{
+    Iter,
+    MultiStore,
+};
+
+use crate::store::integer::{
+    Key,
+    PrimitiveInt,
+};
+
+pub struct MultiIntegerStore<K>
+where
+    K: PrimitiveInt,
+{
+    inner: MultiStore,
+    phantom: PhantomData<K>,
+}
+
+impl<K> MultiIntegerStore<K>
+where
+    K: PrimitiveInt,
+{
+    pub(crate) fn new(db: Database) -> MultiIntegerStore<K> {
+        MultiIntegerStore {
+            inner: MultiStore::new(db),
+            phantom: PhantomData,
+        }
+    }
+
+    pub fn get<'env, T: Readable>(&self, reader: &'env T, k: K) -> Result<Iter<'env>, StoreError> {
+        self.inner.get(reader, Key::new(&k)?)
+    }
+
+    pub fn get_first<'env, T: Readable>(&self, reader: &'env T, k: K) -> Result<Option<Value<'env>>, StoreError> {
+        self.inner.get_first(reader, Key::new(&k)?)
+    }
+
+    pub fn put(&self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        self.inner.put(writer, Key::new(&k)?, v)
+    }
+
+    pub fn put_with_flags(&self, writer: &mut Writer, k: K, v: &Value, flags: WriteFlags) -> Result<(), StoreError> {
+        self.inner.put_with_flags(writer, Key::new(&k)?, v, flags)
+    }
+
+    pub fn delete_all(&self, writer: &mut Writer, k: K) -> Result<(), StoreError> {
+        self.inner.delete_all(writer, Key::new(&k)?)
+    }
+
+    pub fn delete(&self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        self.inner.delete(writer, Key::new(&k)?, v)
+    }
+
+    pub fn clear(&self, writer: &mut Writer) -> Result<(), StoreError> {
+        self.inner.clear(writer)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    extern crate tempfile;
+
+    use self::tempfile::Builder;
+    use std::fs;
+
+    use super::*;
+    use crate::*;
+
+    #[test]
+    fn test_integer_keys() {
+        let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+        macro_rules! test_integer_keys {
+            ($type:ty, $key:expr) => {{
+                let mut writer = k.write().expect("writer");
+
+                s.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
+                assert_eq!(s.get_first(&writer, $key).expect("read"), Some(Value::Str("hello!")));
+                writer.commit().expect("committed");
+
+                let reader = k.read().expect("reader");
+                assert_eq!(s.get_first(&reader, $key).expect("read"), Some(Value::Str("hello!")));
+            }};
+        }
+
+        test_integer_keys!(u32, std::u32::MIN);
+        test_integer_keys!(u32, std::u32::MAX);
+    }
+
+    #[test]
+    fn test_clear() {
+        let root = Builder::new().prefix("test_multi_integer_clear").tempdir().expect("tempdir");
+        fs::create_dir_all(root.path()).expect("dir created");
+        let k = Rkv::new(root.path()).expect("new succeeded");
+        let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+        {
+            let mut writer = k.write().expect("writer");
+            s.put(&mut writer, 1, &Value::Str("hello!")).expect("write");
+            s.put(&mut writer, 1, &Value::Str("hello1!")).expect("write");
+            s.put(&mut writer, 2, &Value::Str("hello!")).expect("write");
+            writer.commit().expect("committed");
+        }
+
+        {
+            let mut writer = k.write().expect("writer");
+            s.clear(&mut writer).expect("cleared");
+            writer.commit().expect("committed");
+
+            let reader = k.read().expect("reader");
+            assert_eq!(s.get_first(&reader, 1).expect("read"), None);
+            assert_eq!(s.get_first(&reader, 2).expect("read"), None);
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/store/multi.rs
@@ -0,0 +1,151 @@
+// Copyright 2018 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::{
+    error::StoreError,
+    read_transform,
+    readwrite::{
+        Readable,
+        Writer,
+    },
+    value::Value,
+};
+use lmdb::{
+    Cursor,
+    Database,
+    Iter as LmdbIter,
+    //    IterDup as LmdbIterDup,
+    RoCursor,
+    WriteFlags,
+};
+
+#[derive(Copy, Clone)]
+pub struct MultiStore {
+    db: Database,
+}
+
+pub struct Iter<'env> {
+    iter: LmdbIter<'env>,
+    cursor: RoCursor<'env>,
+}
+
+impl MultiStore {
+    pub(crate) fn new(db: Database) -> MultiStore {
+        MultiStore {
+            db,
+        }
+    }
+
+    /// Provides a cursor to all of the values for the duplicate entries that match this key
+    pub fn get<T: Readable, K: AsRef<[u8]>>(self, reader: &T, k: K) -> Result<Iter, StoreError> {
+        let mut cursor = reader.open_ro_cursor(self.db)?;
+        let iter = cursor.iter_dup_of(k);
+        Ok(Iter {
+            iter,
+            cursor,
+        })
+    }
+
+    /// Provides the first value that matches this key
+    pub fn get_first<T: Readable, K: AsRef<[u8]>>(self, reader: &T, k: K) -> Result<Option<Value>, StoreError> {
+        reader.get(self.db, &k)
+    }
+
+    /// Insert a value at the specified key.
+    /// This put will allow duplicate entries.  If you wish to have duplicate entries
+    /// rejected, use the `put_with_flags` function and specify NO_DUP_DATA
+    pub fn put<K: AsRef<[u8]>>(self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        writer.put(self.db, &k, v, WriteFlags::empty())
+    }
+
+    pub fn put_with_flags<K: AsRef<[u8]>>(
+        self,
+        writer: &mut Writer,
+        k: K,
+        v: &Value,
+        flags: WriteFlags,
+    ) -> Result<(), StoreError> {
+        writer.put(self.db, &k, v, flags)
+    }
+
+    pub fn delete_all<K: AsRef<[u8]>>(self, writer: &mut Writer, k: K) -> Result<(), StoreError> {
+        writer.delete(self.db, &k, None)
+    }
+
+    pub fn delete<K: AsRef<[u8]>>(self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        writer.delete(self.db, &k, Some(&v.to_bytes()?))
+    }
+
+    /* TODO - Figure out how to solve the need to have the cursor stick around when
+     *        we are producing iterators from MultiIter
+    /// Provides an iterator starting at the lexographically smallest value in the store
+    pub fn iter_start(&self, store: MultiStore) -> Result<MultiIter, StoreError> {
+        let mut cursor = self.tx.open_ro_cursor(store.0).map_err(StoreError::LmdbError)?;
+
+        // We call Cursor.iter() instead of Cursor.iter_start() because
+        // the latter panics at "called `Result::unwrap()` on an `Err` value:
+        // NotFound" when there are no items in the store, whereas the former
+        // returns an iterator that yields no items.
+        //
+        // And since we create the Cursor and don't change its position, we can
+        // be sure that a call to Cursor.iter() will start at the beginning.
+        //
+        let iter = cursor.iter_dup();
+
+        Ok(MultiIter {
+            iter,
+            cursor,
+        })
+    }
+    */
+
+    pub fn clear(self, writer: &mut Writer) -> Result<(), StoreError> {
+        writer.clear(self.db)
+    }
+}
+
+/*
+fn read_transform_owned(val: Result<&[u8], lmdb::Error>) -> Result<Option<OwnedValue>, StoreError> {
+    match val {
+        Ok(bytes) => Value::from_tagged_slice(bytes).map(|v| Some(OwnedValue::from(&v))).map_err(StoreError::DataError),
+        Err(lmdb::Error::NotFound) => Ok(None),
+        Err(e) => Err(StoreError::LmdbError(e)),
+    }
+}
+
+impl<'env> Iterator for MultiIter<'env> {
+    type Item = Iter<'env>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.iter.next() {
+            None => None,
+            Some(iter) => Some(Iter {
+                iter,
+                cursor,
+            }),
+        }
+    }
+}
+*/
+
+impl<'env> Iterator for Iter<'env> {
+    type Item = Result<(&'env [u8], Option<Value<'env>>), StoreError>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.iter.next() {
+            None => None,
+            Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) {
+                Ok(val) => Some(Ok((key, val))),
+                Err(err) => Some(Err(err)),
+            },
+            Some(Err(err)) => Some(Err(StoreError::LmdbError(err))),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/store/single.rs
@@ -0,0 +1,104 @@
+// Copyright 2018 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::{
+    error::StoreError,
+    read_transform,
+    readwrite::{
+        Readable,
+        Writer,
+    },
+    value::Value,
+};
+use lmdb::{
+    Cursor,
+    Database,
+    Iter as LmdbIter,
+    RoCursor,
+    WriteFlags,
+};
+
+#[derive(Copy, Clone)]
+pub struct SingleStore {
+    db: Database,
+}
+
+pub struct Iter<'env> {
+    iter: LmdbIter<'env>,
+    cursor: RoCursor<'env>,
+}
+
+impl SingleStore {
+    pub(crate) fn new(db: Database) -> SingleStore {
+        SingleStore {
+            db,
+        }
+    }
+
+    pub fn get<T: Readable, K: AsRef<[u8]>>(self, reader: &T, k: K) -> Result<Option<Value>, StoreError> {
+        reader.get(self.db, &k)
+    }
+
+    // TODO: flags
+    pub fn put<K: AsRef<[u8]>>(self, writer: &mut Writer, k: K, v: &Value) -> Result<(), StoreError> {
+        writer.put(self.db, &k, v, WriteFlags::empty())
+    }
+
+    pub fn delete<K: AsRef<[u8]>>(self, writer: &mut Writer, k: K) -> Result<(), StoreError> {
+        writer.delete(self.db, &k, None)
+    }
+
+    pub fn iter_start<T: Readable>(self, reader: &T) -> Result<Iter, StoreError> {
+        let mut cursor = reader.open_ro_cursor(self.db)?;
+
+        // We call Cursor.iter() instead of Cursor.iter_start() because
+        // the latter panics at "called `Result::unwrap()` on an `Err` value:
+        // NotFound" when there are no items in the store, whereas the former
+        // returns an iterator that yields no items.
+        //
+        // And since we create the Cursor and don't change its position, we can
+        // be sure that a call to Cursor.iter() will start at the beginning.
+        //
+        let iter = cursor.iter();
+
+        Ok(Iter {
+            iter,
+            cursor,
+        })
+    }
+
+    pub fn iter_from<T: Readable, K: AsRef<[u8]>>(self, reader: &T, k: K) -> Result<Iter, StoreError> {
+        let mut cursor = reader.open_ro_cursor(self.db)?;
+        let iter = cursor.iter_from(k);
+        Ok(Iter {
+            iter,
+            cursor,
+        })
+    }
+
+    pub fn clear(self, writer: &mut Writer) -> Result<(), StoreError> {
+        writer.clear(self.db)
+    }
+}
+
+impl<'env> Iterator for Iter<'env> {
+    type Item = Result<(&'env [u8], Option<Value<'env>>), StoreError>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.iter.next() {
+            None => None,
+            Some(Ok((key, bytes))) => match read_transform(Ok(bytes)) {
+                Ok(val) => Some(Ok((key, val))),
+                Err(err) => Some(Err(err)),
+            },
+            Some(Err(err)) => Some(Err(StoreError::LmdbError(err))),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/src/value.rs
@@ -0,0 +1,265 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use arrayref::array_ref;
+use bincode::{
+    deserialize,
+    serialize,
+    serialized_size,
+};
+use ordered_float::OrderedFloat;
+
+use uuid::{
+    Bytes,
+    Uuid,
+};
+
+use crate::error::DataError;
+
+/// We define a set of types, associated with simple integers, to annotate values
+/// stored in LMDB. This is to avoid an accidental 'cast' from a value of one type
+/// to another. For this reason we don't simply use `deserialize` from the `bincode`
+/// crate.
+#[repr(u8)]
+#[derive(Debug, PartialEq, Eq)]
+pub enum Type {
+    Bool = 1,
+    U64 = 2,
+    I64 = 3,
+    F64 = 4,
+    Instant = 5, // Millisecond-precision timestamp.
+    Uuid = 6,
+    Str = 7,
+    Json = 8,
+    Blob = 9,
+}
+
+/// We use manual tagging, because <https://github.com/serde-rs/serde/issues/610>.
+impl Type {
+    pub fn from_tag(tag: u8) -> Result<Type, DataError> {
+        Type::from_primitive(tag).ok_or_else(|| DataError::UnknownType(tag))
+    }
+
+    #[allow(clippy::wrong_self_convention)]
+    pub fn to_tag(self) -> u8 {
+        self as u8
+    }
+
+    fn from_primitive(p: u8) -> Option<Type> {
+        match p {
+            1 => Some(Type::Bool),
+            2 => Some(Type::U64),
+            3 => Some(Type::I64),
+            4 => Some(Type::F64),
+            5 => Some(Type::Instant),
+            6 => Some(Type::Uuid),
+            7 => Some(Type::Str),
+            8 => Some(Type::Json),
+            9 => Some(Type::Blob),
+            _ => None,
+        }
+    }
+}
+
+impl ::std::fmt::Display for Type {
+    fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+        f.write_str(match *self {
+            Type::Bool => "bool",
+            Type::U64 => "u64",
+            Type::I64 => "i64",
+            Type::F64 => "f64",
+            Type::Instant => "instant",
+            Type::Uuid => "uuid",
+            Type::Str => "str",
+            Type::Json => "json",
+            Type::Blob => "blob",
+        })
+    }
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum Value<'s> {
+    Bool(bool),
+    U64(u64),
+    I64(i64),
+    F64(OrderedFloat<f64>),
+    Instant(i64), // Millisecond-precision timestamp.
+    Uuid(&'s Bytes),
+    Str(&'s str),
+    Json(&'s str),
+    Blob(&'s [u8]),
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum OwnedValue {
+    Bool(bool),
+    U64(u64),
+    I64(i64),
+    F64(f64),
+    Instant(i64), // Millisecond-precision timestamp.
+    Uuid(Uuid),
+    Str(String),
+    Json(String), // TODO
+    Blob(Vec<u8>),
+}
+
+fn uuid(bytes: &[u8]) -> Result<Value, DataError> {
+    if bytes.len() == 16 {
+        Ok(Value::Uuid(array_ref![bytes, 0, 16]))
+    } else {
+        Err(DataError::InvalidUuid)
+    }
+}
+
+impl<'s> Value<'s> {
+    fn expected_from_tagged_slice(expected: Type, slice: &'s [u8]) -> Result<Value<'s>, DataError> {
+        let (tag, data) = slice.split_first().ok_or(DataError::Empty)?;
+        let t = Type::from_tag(*tag)?;
+        if t == expected {
+            return Err(DataError::UnexpectedType {
+                expected,
+                actual: t,
+            });
+        }
+        Value::from_type_and_data(t, data)
+    }
+
+    pub fn from_tagged_slice(slice: &'s [u8]) -> Result<Value<'s>, DataError> {
+        let (tag, data) = slice.split_first().ok_or(DataError::Empty)?;
+        let t = Type::from_tag(*tag)?;
+        Value::from_type_and_data(t, data)
+    }
+
+    fn from_type_and_data(t: Type, data: &'s [u8]) -> Result<Value<'s>, DataError> {
+        if t == Type::Uuid {
+            return deserialize(data)
+                .map_err(|e| DataError::DecodingError {
+                    value_type: t,
+                    err: e,
+                })
+                .map(uuid)?;
+        }
+
+        match t {
+            Type::Bool => deserialize(data).map(Value::Bool),
+            Type::U64 => deserialize(data).map(Value::U64),
+            Type::I64 => deserialize(data).map(Value::I64),
+            Type::F64 => deserialize(data).map(OrderedFloat).map(Value::F64),
+            Type::Instant => deserialize(data).map(Value::Instant),
+            Type::Str => deserialize(data).map(Value::Str),
+            Type::Json => deserialize(data).map(Value::Json),
+            Type::Blob => deserialize(data).map(Value::Blob),
+            Type::Uuid => {
+                // Processed above to avoid verbose duplication of error transforms.
+                unreachable!()
+            },
+        }
+        .map_err(|e| DataError::DecodingError {
+            value_type: t,
+            err: e,
+        })
+    }
+
+    pub fn to_bytes(&self) -> Result<Vec<u8>, DataError> {
+        match self {
+            Value::Bool(v) => serialize(&(Type::Bool.to_tag(), *v)),
+            Value::U64(v) => serialize(&(Type::U64.to_tag(), *v)),
+            Value::I64(v) => serialize(&(Type::I64.to_tag(), *v)),
+            Value::F64(v) => serialize(&(Type::F64.to_tag(), v.0)),
+            Value::Instant(v) => serialize(&(Type::Instant.to_tag(), *v)),
+            Value::Str(v) => serialize(&(Type::Str.to_tag(), v)),
+            Value::Json(v) => serialize(&(Type::Json.to_tag(), v)),
+            Value::Blob(v) => serialize(&(Type::Blob.to_tag(), v)),
+            Value::Uuid(v) => serialize(&(Type::Uuid.to_tag(), v)),
+        }
+        .map_err(DataError::EncodingError)
+    }
+
+    pub fn serialized_size(&self) -> Result<u64, DataError> {
+        match self {
+            Value::Bool(v) => serialized_size(&(Type::Bool.to_tag(), *v)),
+            Value::U64(v) => serialized_size(&(Type::U64.to_tag(), *v)),
+            Value::I64(v) => serialized_size(&(Type::I64.to_tag(), *v)),
+            Value::F64(v) => serialized_size(&(Type::F64.to_tag(), v.0)),
+            Value::Instant(v) => serialized_size(&(Type::Instant.to_tag(), *v)),
+            Value::Str(v) => serialized_size(&(Type::Str.to_tag(), v)),
+            Value::Json(v) => serialized_size(&(Type::Json.to_tag(), v)),
+            Value::Blob(v) => serialized_size(&(Type::Blob.to_tag(), v)),
+            Value::Uuid(v) => serialized_size(&(Type::Uuid.to_tag(), v)),
+        }
+        .map_err(DataError::EncodingError)
+    }
+}
+
+impl<'s> From<&'s Value<'s>> for OwnedValue {
+    fn from(value: &Value) -> OwnedValue {
+        match value {
+            Value::Bool(v) => OwnedValue::Bool(*v),
+            Value::U64(v) => OwnedValue::U64(*v),
+            Value::I64(v) => OwnedValue::I64(*v),
+            Value::F64(v) => OwnedValue::F64(**v),
+            Value::Instant(v) => OwnedValue::Instant(*v),
+            Value::Uuid(v) => OwnedValue::Uuid(Uuid::from_bytes(**v)),
+            Value::Str(v) => OwnedValue::Str(v.to_string()),
+            Value::Json(v) => OwnedValue::Json(v.to_string()),
+            Value::Blob(v) => OwnedValue::Blob(v.to_vec()),
+        }
+    }
+}
+
+impl<'s> From<&'s OwnedValue> for Value<'s> {
+    fn from(value: &OwnedValue) -> Value {
+        match value {
+            OwnedValue::Bool(v) => Value::Bool(*v),
+            OwnedValue::U64(v) => Value::U64(*v),
+            OwnedValue::I64(v) => Value::I64(*v),
+            OwnedValue::F64(v) => Value::F64(OrderedFloat::from(*v)),
+            OwnedValue::Instant(v) => Value::Instant(*v),
+            OwnedValue::Uuid(v) => Value::Uuid(v.as_bytes()),
+            OwnedValue::Str(v) => Value::Str(v),
+            OwnedValue::Json(v) => Value::Json(v),
+            OwnedValue::Blob(v) => Value::Blob(v),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use ordered_float::OrderedFloat;
+
+    #[test]
+    fn test_value_serialized_size() {
+        // | Value enum    | tag: 1 byte   |     value_payload        |
+        // |----------------------------------------------------------|
+        // |   I64         |     1         |       8                  |
+        // |   U64         |     1         |       8                  |
+        // |   Bool        |     1         |       1                  |
+        // |   Instant     |     1         |       8                  |
+        // |   F64         |     1         |       8                  |
+        // |   Uuid        |     1         |       16                 |
+        // | Str/Blob/Json |     1         |(8: len + sizeof(payload))|
+        assert_eq!(Value::I64(-1000).serialized_size().unwrap(), 9);
+        assert_eq!(Value::U64(1000u64).serialized_size().unwrap(), 9);
+        assert_eq!(Value::Bool(true).serialized_size().unwrap(), 2);
+        assert_eq!(Value::Instant(1_558_020_865_224).serialized_size().unwrap(), 9);
+        assert_eq!(Value::F64(OrderedFloat(10000.1)).serialized_size().unwrap(), 9);
+        assert_eq!(Value::Str("hello!").serialized_size().unwrap(), 15);
+        assert_eq!(Value::Str("¡Hola").serialized_size().unwrap(), 15);
+        assert_eq!(Value::Blob(b"hello!").serialized_size().unwrap(), 15);
+        assert_eq!(
+            uuid(b"\x9f\xe2\xc4\xe9\x3f\x65\x4f\xdb\xb2\x4c\x02\xb1\x52\x59\x71\x6c")
+                .unwrap()
+                .serialized_size()
+                .unwrap(),
+            17
+        );
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/tests/integer-store.rs
@@ -0,0 +1,77 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use rkv::{
+    PrimitiveInt,
+    Rkv,
+    StoreOptions,
+    Value,
+};
+use serde_derive::Serialize;
+use std::fs;
+use tempfile::Builder;
+
+#[test]
+fn test_integer_keys() {
+    let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
+    fs::create_dir_all(root.path()).expect("dir created");
+    let k = Rkv::new(root.path()).expect("new succeeded");
+    let s = k.open_integer("s", StoreOptions::create()).expect("open");
+
+    macro_rules! test_integer_keys {
+        ($store:expr, $key:expr) => {{
+            let mut writer = k.write().expect("writer");
+
+            $store.put(&mut writer, $key, &Value::Str("hello!")).expect("write");
+            assert_eq!($store.get(&writer, $key).expect("read"), Some(Value::Str("hello!")));
+            writer.commit().expect("committed");
+
+            let reader = k.read().expect("reader");
+            assert_eq!($store.get(&reader, $key).expect("read"), Some(Value::Str("hello!")));
+        }};
+    }
+
+    // The integer module provides only the u32 integer key variant
+    // of IntegerStore, so we can use it without further ado.
+    test_integer_keys!(s, std::u32::MIN);
+    test_integer_keys!(s, std::u32::MAX);
+
+    // If you want to use another integer key variant, you need to implement
+    // a newtype, implement PrimitiveInt, and implement or derive Serialize
+    // for it.  Here we do so for the i32 type.
+
+    // DANGER!  Doing this enables you to open a store with multiple,
+    // different integer key types, which may result in unexpected behavior.
+    // Make sure you know what you're doing!
+
+    let t = k.open_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct I32(i32);
+    impl PrimitiveInt for I32 {}
+    test_integer_keys!(t, I32(std::i32::MIN));
+    test_integer_keys!(t, I32(std::i32::MAX));
+
+    let u = k.open_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct U16(u16);
+    impl PrimitiveInt for U16 {}
+    test_integer_keys!(u, U16(std::u16::MIN));
+    test_integer_keys!(u, U16(std::u16::MAX));
+
+    let v = k.open_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct U64(u64);
+    impl PrimitiveInt for U64 {}
+    test_integer_keys!(v, U64(std::u64::MIN));
+    test_integer_keys!(v, U64(std::u64::MAX));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/tests/manager.rs
@@ -0,0 +1,34 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use rkv::{
+    Manager,
+    Rkv,
+};
+use std::{
+    fs,
+    sync::Arc,
+};
+use tempfile::Builder;
+
+#[test]
+// Identical to the same-named unit test, but this one confirms that it works
+// via the public MANAGER singleton.
+fn test_same() {
+    let root = Builder::new().prefix("test_same_singleton").tempdir().expect("tempdir");
+    fs::create_dir_all(root.path()).expect("dir created");
+
+    let p = root.path();
+    assert!(Manager::singleton().read().unwrap().get(p).expect("success").is_none());
+
+    let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new).expect("created");
+    let fetched_arc = Manager::singleton().read().unwrap().get(p).expect("success").expect("existed");
+    assert!(Arc::ptr_eq(&created_arc, &fetched_arc));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/tests/multi-integer-store.rs
@@ -0,0 +1,91 @@
+// Copyright 2018 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use rkv::{
+    PrimitiveInt,
+    Rkv,
+    StoreOptions,
+    Value,
+};
+use serde_derive::Serialize;
+use std::fs;
+use tempfile::Builder;
+
+#[test]
+fn test_multi_integer_keys() {
+    let root = Builder::new().prefix("test_integer_keys").tempdir().expect("tempdir");
+    fs::create_dir_all(root.path()).expect("dir created");
+    let k = Rkv::new(root.path()).expect("new succeeded");
+    let s = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+    macro_rules! test_integer_keys {
+        ($store:expr, $key:expr) => {{
+            let mut writer = k.write().expect("writer");
+
+            $store.put(&mut writer, $key, &Value::Str("hello1")).expect("write");
+            $store.put(&mut writer, $key, &Value::Str("hello2")).expect("write");
+            $store.put(&mut writer, $key, &Value::Str("hello3")).expect("write");
+            let vals = $store
+                .get(&writer, $key)
+                .expect("read")
+                .map(|result| result.expect("ok"))
+                .map(|(_, v)| v.expect("multi read"))
+                .collect::<Vec<Value>>();
+            assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
+            writer.commit().expect("committed");
+
+            let reader = k.read().expect("reader");
+            let vals = $store
+                .get(&reader, $key)
+                .expect("read")
+                .map(|result| result.expect("ok"))
+                .map(|(_, v)| v.expect("multi read"))
+                .collect::<Vec<Value>>();
+            assert_eq!(vals, vec![Value::Str("hello1"), Value::Str("hello2"), Value::Str("hello3")]);
+        }};
+    }
+
+    // The integer module provides only the u32 integer key variant
+    // of IntegerStore, so we can use it without further ado.
+    test_integer_keys!(s, std::u32::MIN);
+    test_integer_keys!(s, std::u32::MAX);
+
+    // If you want to use another integer key variant, you need to implement
+    // a newtype, implement PrimitiveInt, and implement or derive Serialize
+    // for it.  Here we do so for the i32 type.
+
+    // DANGER!  Doing this enables you to open a store with multiple,
+    // different integer key types, which may result in unexpected behavior.
+    // Make sure you know what you're doing!
+
+    let t = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct I32(i32);
+    impl PrimitiveInt for I32 {}
+    test_integer_keys!(t, I32(std::i32::MIN));
+    test_integer_keys!(t, I32(std::i32::MAX));
+
+    let u = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct U16(u16);
+    impl PrimitiveInt for U16 {}
+    test_integer_keys!(u, U16(std::u16::MIN));
+    test_integer_keys!(u, U16(std::u16::MAX));
+
+    let v = k.open_multi_integer("s", StoreOptions::create()).expect("open");
+
+    #[derive(Serialize)]
+    struct U64(u64);
+    impl PrimitiveInt for U64 {}
+    test_integer_keys!(v, U64(std::u64::MIN));
+    test_integer_keys!(v, U64(std::u64::MAX));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv-0.10.2/tests/test_txn.rs
@@ -0,0 +1,105 @@
+/// consider a struct like this
+/// struct Sample {
+///     id: u64,
+///     value: String,
+///     date: String,
+/// }
+/// We would like to index all of the fields so that we can search for the struct not only by ID
+/// but also by value and date.  When we index the fields individually in their own tables, it
+/// is important that we run all operations within a single transaction to ensure coherence of
+/// the indices
+/// This test features helper functions for reading and writing the parts of the struct.
+/// Note that the reader functions take `Readable` because they might run within a Read
+/// Transaction or a Write Transaction.  The test demonstrates fetching values via both.
+use rkv::{
+    MultiStore,
+    Readable,
+    Rkv,
+    SingleStore,
+    StoreOptions,
+    Value,
+    Writer,
+};
+
+use tempfile::Builder;
+
+use std::fs;
+
+#[test]
+fn read_many() {
+    let root = Builder::new().prefix("test_txns").tempdir().expect("tempdir");
+    fs::create_dir_all(root.path()).expect("dir created");
+    let k = Rkv::new(root.path()).expect("new succeeded");
+    let samplestore = k.open_single("s", StoreOptions::create()).expect("open");
+    let datestore = k.open_multi("m", StoreOptions::create()).expect("open");
+    let valuestore = k.open_multi("m", StoreOptions::create()).expect("open");
+
+    {
+        let mut writer = k.write().expect("env write lock");
+
+        for id in 0..30_u64 {
+            let value = format!("value{}", id);
+            let date = format!("2019-06-{}", id);
+            put_id_field(&mut writer, datestore, &date, id);
+            put_id_field(&mut writer, valuestore, &value, id);
+            put_sample(&mut writer, samplestore, id, &value);
+        }
+
+        // now we read in the same transaction
+        for id in 0..30_u64 {
+            let value = format!("value{}", id);
+            let date = format!("2019-06-{}", id);
+            let ids = get_ids_by_field(&writer, datestore, &date);
+            let ids2 = get_ids_by_field(&writer, valuestore, &value);
+            let samples = get_samples(&writer, samplestore, &ids);
+            let samples2 = get_samples(&writer, samplestore, &ids2);
+            println!("{:?}, {:?}", samples, samples2);
+        }
+    }
+
+    {
+        let reader = k.read().expect("env read lock");
+        for id in 0..30_u64 {
+            let value = format!("value{}", id);
+            let date = format!("2019-06-{}", id);
+            let ids = get_ids_by_field(&reader, datestore, &date);
+            let ids2 = get_ids_by_field(&reader, valuestore, &value);
+            let samples = get_samples(&reader, samplestore, &ids);
+            let samples2 = get_samples(&reader, samplestore, &ids2);
+            println!("{:?}, {:?}", samples, samples2);
+        }
+    }
+}
+
+fn get_ids_by_field<Txn: Readable>(txn: &Txn, store: MultiStore, field: &str) -> Vec<u64> {
+    store
+        .get(txn, field)
+        .expect("get iterator")
+        .map(|id| match id.expect("field") {
+            (_, Some(Value::U64(id))) => id,
+            _ => panic!("getting value in iter"),
+        })
+        .collect::<Vec<u64>>()
+}
+
+fn get_samples<Txn: Readable>(txn: &Txn, samplestore: SingleStore, ids: &[u64]) -> Vec<String> {
+    ids.iter()
+        .map(|id| {
+            let bytes = id.to_be_bytes();
+            match samplestore.get(txn, &bytes).expect("fetch sample") {
+                Some(Value::Str(sample)) => String::from(sample),
+                Some(_) => panic!("wrong type"),
+                None => panic!("no sample for this id!"),
+            }
+        })
+        .collect::<Vec<String>>()
+}
+
+fn put_sample(txn: &mut Writer, samplestore: SingleStore, id: u64, value: &str) {
+    let idbytes = id.to_be_bytes();
+    samplestore.put(txn, &idbytes, &Value::Str(value)).expect("put id");
+}
+
+fn put_id_field(txn: &mut Writer, store: MultiStore, field: &str, id: u64) {
+    store.put(txn, field, &Value::U64(id)).expect("put id");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/.appveyor.yml
@@ -0,0 +1,38 @@
+environment:
+  matrix:
+    - TARGET: x86_64-pc-windows-msvc
+      TOOLCHAIN: stable
+    - TARGET: i686-pc-windows-msvc
+      TOOLCHAIN: stable
+    - TARGET: x86_64-pc-windows-msvc
+      TOOLCHAIN: beta
+    - TARGET: i686-pc-windows-msvc
+      TOOLCHAIN: beta
+    - TARGET: x86_64-pc-windows-msvc
+      TOOLCHAIN: nightly
+    - TARGET: i686-pc-windows-msvc
+      TOOLCHAIN: nightly
+
+install:
+  - curl -sSf -o rustup-init.exe https://win.rustup.rs/
+  - rustup-init.exe -y --default-host %TARGET% --default-toolchain %TOOLCHAIN%
+  - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
+  - choco install make -y
+  - choco install mingw -y
+  - refreshenv
+  - rustc -Vv
+  - cargo -Vv
+  - make -v
+  - gcc -v
+
+# Disable AppVeyor's build phase, let 'cargo test' take care of the build
+build: false
+
+test_script:
+  - SET RUST_BACKTRACE=1
+  - cargo test --all --target %TARGET% --verbose
+  - cargo test --all --release --target %TARGET% --verbose
+
+cache:
+  - C:\Users\appveyor\.cargo\registry
+  - target
--- a/third_party/rust/rkv/.cargo-checksum.json
+++ b/third_party/rust/rkv/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"c95c530d76b891215cce4342a806bbc1747ab4d62f54330d932dafb542fa1a56","Cargo.toml":"00eb8afcb73a205013caf49fff1378a2304269f87f9d79beece7039f9bfb5ccf","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"e28eb7d26ddd6dd71e1757f4eab63044b5c430932ef3c3a24e9772ddc78ebf85","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"ddc3997e394a30ad82d78d2675a48c4617353f88b89bb9a3df5a3804d59b8ef9","examples/simple-store.rs":"cae63e39f2f98ee6ac2f387dcb02d6b929828a74f32f7d18d69c7fc9c3cce765","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/bin/dump.rs":"da8543848e57893902751f4c4745e835b9c86263da2344af18d5717014f645f5","src/bin/rand.rs":"3da924fa0f1a118f606e2b94aee3a0553d9ebdbd17ee0152b85148adbf521bba","src/env.rs":"5deac6b35e49da1d47d7c852ed2e30ef96b6d15998fe7a79479cec64697626fc","src/error.rs":"f2cbab99691f36c98c24d297de3a303de258ddd3a06e2f54cb5efce20eb3740b","src/lib.rs":"4fe4e7d6a912a850b709ed23e372acd4f214890066322b4720376f7772bb776e","src/manager.rs":"ff2d76056e3a7200035b2e75c5bc2159f337e59c076dddd2476e3094b6ae3741","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"fde695333e4845f4f53d63da6281f585919e2a3ac5cfe00d173cc139bc822763","src/store.rs":"409d13b1ea0d1254dae947ecbce50e741fb71c3ca118a78803b734336dce6a8f","src/store/integer.rs":"f386474c971f671c9b316a16ebff5b586be6837c886f443753ae13277a7e0070","src/store/integermulti.rs":"1a0912f97619297da31cc8c146e38941b88539d2857df81191a49c8dbd18625d","src/store/multi.rs":"2dec01c2202a2c9069cced4e1e42906b01d0b85df25d17e0ea810c05fa8395d0","src/store/single.rs":"c55c3600714f5ed9e820b16c2335ae00a0071174e0a32b9df89a34182a4b908c","src/value.rs":"7fae77a8291b951591e557ec694bfdadc9eb78557dad36a970cfcdcfb83fd238","tests/integer-store.rs":"f7e06c71b0dead2323c7c61fc8bcbffbdd3a4796eebf6138db9cce3dbba716a3","tests/manager.rs":"97ec61145dc227f4f5fbcb6449c096bbe5b9a09db4e61ff4491c0443fe9adf26","tests/multi-integer-store.rs":"83295b0135c502321304aa06b05d5a9eeab41b1438ed7ddf2cb1a3613dfef4d9","tests/test_txn.rs":"f486d8bd485398e49ae64eac59ca3b44dfa7f8340aab17483cd3e9864fadd88b"},"package":"9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf"}
\ No newline at end of file
+{"files":{".appveyor.yml":"053bc7a827d759dcdc6ef8a8299432c80bf5b2970167a0add2bbaa83e77d3c7d",".rustfmt.toml":"b484c99708d8cdb01be0ef680a15b8897112942d041fc656fc5622816b6b3412",".travis.yml":"8a452fdc9dc79c68f42917887eb9c71218511bb57f6ca28e9e0fbef6cec952f4","CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.toml":"2a04dc5fb3c6950f40057be05b9cb4eeff27e917045e7b078dc7583e00cfbe56","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"e28eb7d26ddd6dd71e1757f4eab63044b5c430932ef3c3a24e9772ddc78ebf85","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"4c38a8cea1dfecc79387b7f347a9938f2d3301b4d063dca4e940966650a777ec","examples/simple-store.rs":"c4d4a9ca0f2098b2bc0fa98c4c8ce814f2f143aa8e4fbacfdbd06f407d33ff63","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/backend.rs":"eaab0a3216b8a35689fa3d38820a14323646d6fa1ba8c48d150786e6260978a2","src/backend/common.rs":"43e12f967ad73209c8c44da133765395eab661a7ae25d4db05acabade6c8bc9c","src/backend/impl_lmdb.rs":"2fa6da1aff5bbfc9915e6d14117ddb8580470d420f9167bcecd282f9afb3178d","src/backend/impl_lmdb/cursor.rs":"8e6ecadcdbafa67d7f12d06f049b16967459a13d4915c77dc49d29bd246ab2e3","src/backend/impl_lmdb/database.rs":"c52ab76a4389c525a998eef0302e709d57a22f2627a967b2246e98ae15f4a999","src/backend/impl_lmdb/environment.rs":"0477a913f9bbbfc98ec9be71925ce3eca27f46f5486934e5015d9a9b830713ec","src/backend/impl_lmdb/error.rs":"7536a64cd5ee776dbf32da9d7ffac0a8a2d2fb23301189771a6b315a29587b45","src/backend/impl_lmdb/flags.rs":"90b06968029e8d3533a3f4579f2c6737699b0d6556c23ba1413540496847cf20","src/backend/impl_lmdb/info.rs":"e44d9100c0acc179263f41c70d2d139faa1b19efe6948c951958986c5fc90dbf","src/backend/impl_lmdb/iter.rs":"a441d4053260899ac87cd58b2e6502a3688a5b968d3fc9615ac311448583c794","src/backend/impl_lmdb/stat.rs":"ec3100fee914cfe4805a7c588344c707c027bad7b37b43b7455baa0772cb64f9","src/backend/impl_lmdb/transaction.rs":"306d77c9e3985e2de1592d2274c8e4be76dd0d453e1578c474bc8d82185f202e","src/backend/impl_safe.rs":"5f09bb3cb4ae3ae2fde44fb6aba1c431f3c18df1baa2f52e70ed318835962206","src/backend/impl_safe/cursor.rs":"7cd148f62c649f0a0baa7ecfa7e728ed0df70e6876220277b3613e6dd645c0a1","src/backend/impl_safe/database.rs":"94dbc134bcfecf77c2538e64c1ea47b06d692a9005753e19a4c0318fd5e5b815","src/backend/impl_safe/environment.rs":"bd718fa17a56230c7f8e000f07a7c389286f7127900d5157872f51db9bb17cda","src/backend/impl_safe/error.rs":"013c656e7a83fd98b6178ff2353ddb1752040eceffef3247fc9902cf7e8c3ac9","src/backend/impl_safe/flags.rs":"6a116c08a56b468e57e97138dfd541267e99bcb5b3e7ebe00686ddc46498c580","src/backend/impl_safe/info.rs":"c9dc67d989394dd7a25e99681e4a25f19b4ca3992eb18b719fb89742fae635b2","src/backend/impl_safe/iter.rs":"f4ffc303f7643d19179680ca8ded2714ed8aed4c65b2def5c53d9790b456a99f","src/backend/impl_safe/snapshot.rs":"a24af2719258909e2da9bdc36a93763b5188077af4307a78b89bc00ba4cddabe","src/backend/impl_safe/stat.rs":"77ea9937c2ff839cba4ed5597b4804550915d5c50fce0fc86133bf23cff49d95","src/backend/impl_safe/transaction.rs":"b4821a5affb8a4d1452d306fe6b888bdfe49e47150ed96b1d1786e9f3d34854d","src/backend/traits.rs":"364fef58437df806d9435521f407888c10f9d39bd52e379eca196f1d01a2074e","src/bin/dump.rs":"58b4f36fa1a51dd42099ee94043c1af09c7fe1091307634bc6e777c77185fec5","src/bin/rand.rs":"ca357b19caa142d5016fc4400c8016067a88ca1766346a4f5f0177fc4958a6af","src/env.rs":"3d4071ee02844d915580718b9f8ac24d8635131b3120b81279473b5899284fb8","src/error.rs":"22f0b0559790dd9d4ee46488c9518ffa07156df64851df1bef40549359bd7cb1","src/helpers.rs":"457923fd6b263cc749730e656ee887463cd13b3474d7f085e8779c0ae9d04420","src/lib.rs":"b6c0824064ddb18c8f08502c4f2530349273221850422322d74d110cf62ab271","src/manager.rs":"838f9bb1e2c00308f1317f2fedc0fc0dbe0d5213ab55e6e8cff6b63a42e9f614","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"a14226cc0f2a45b4d02d12dfb8399c4acf5278ca559d1e5a58542ccd27ba884a","src/store.rs":"f6758b4ee9c61ec98468b778cc19bd91b66945819ce5bba11e7b3a8ffe85090e","src/store/integer.rs":"cf8e10a1b8cc72a8d3b804b67a8a766f106f3a905e163424a14f88580103a619","src/store/integermulti.rs":"7c6561651f752d9636af8f296984775a67a2a4989ffc7f53228ca10633918a0b","src/store/keys.rs":"584bf897df7a0a10dd2841cf31cb693124748757902374d215b4d7caf79baae5","src/store/keys/encodables.rs":"d8b5dd6f49cab4a44b6f638c473ad372f589669e7ef9bd6935aa960840b95063","src/store/keys/primitives.rs":"f714e54dd6507e048cf867deecd15a5a452298b8255575aa9eb1c67317fff5dd","src/store/multi.rs":"63abe38990485a41000309fa6816835b455d1949cd3bdfff489eaf5be511a6e3","src/store/single.rs":"85f36530dd9aa362107f69a20c198325dd698c3211c2f264669b7d170397f1a0","src/value.rs":"73221baf6b9dc458840014b9f6a36862a718e9c95f79a905f4ecc044761aa3fa","tests/integer-store.rs":"94d3bbb33be98cd917eae19812de6c735b9eb7bda2cba31e49d96c553d077e40","tests/manager.rs":"b2cecbd47ceebdbb9decf94b390b76ef91d2ce6025de78a4c4e25b073f450bcf","tests/multi-integer-store.rs":"d542e7f338cdac61e2e11b1f88581e0813ea453b219ee9ee78507894cd6359d8","tests/test_txn.rs":"4f20a4a1e39c48f26776a6b1b33927c47286be9e041db25b70ae2b2b0af5cd0f"},"package":null}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/.rustfmt.toml
@@ -0,0 +1,4 @@
+imports_layout = "Vertical"
+max_width = 120
+match_block_trailing_comma = true
+use_small_heuristics = "Off"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/.travis.yml
@@ -0,0 +1,48 @@
+language: rust
+sudo: false
+
+cache: cargo
+
+rust:
+  - 1.37.0
+  - stable
+  - beta
+  - nightly
+
+os:
+  - linux
+  - osx
+
+matrix:
+  allow_failures:
+    - rust: nightly
+  fast_finish: true
+
+before_script:
+  # We install a known-to-have-rustfmt version of the nightly toolchain
+  # in order to run the nightly version of rustfmt, which supports rules
+  # that we depend upon. When updating, pick a suitable nightly version
+  # from https://rust-lang.github.io/rustup-components-history/
+  - rustup toolchain install nightly-2019-09-11
+  - rustup component add rustfmt --toolchain nightly-2019-09-11
+  - rustup component add clippy --toolchain nightly-2019-09-11
+  # Use official clang in order to test out building on osx.
+  - if [[ "$TRAVIS_OS_NAME" = "osx" ]]; then
+      brew update;
+      brew install llvm;
+      export PATH="/usr/local/opt/llvm/bin:$PATH";
+      export LDFLAGS="-L/usr/local/opt/llvm/lib";
+      export CPPFLAGS="-I/usr/local/opt/llvm/include";
+    fi
+
+script:
+  - cargo +nightly-2019-09-11 fmt --all -- --check
+  - CC="clang" cargo +nightly-2019-09-11 clippy --all-features -- -D warnings
+  - cargo build --verbose
+  - export RUST_BACKTRACE=1
+  - cargo test --all --verbose
+  - cargo test --lib --no-default-features --verbose
+  - cargo test --lib --no-default-features --features "db-dup-sort" --verbose
+  - cargo test --lib --no-default-features --features "db-int-key" --verbose
+  - cargo test --release --all --verbose
+  - ./run-all-examples.sh
--- a/third_party/rust/rkv/Cargo.toml
+++ b/third_party/rust/rkv/Cargo.toml
@@ -1,75 +1,49 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
-#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
-
 [package]
-edition = "2018"
 name = "rkv"
-version = "0.10.2"
+version = "0.11.0"
 authors = ["Richard Newman <rnewman@twinql.com>", "Nan Jiang <najiang@mozilla.com>", "Myk Melez <myk@mykzilla.org>", "Victor Porof <vporof@mozilla.com>"]
-exclude = ["/tests/envs/*"]
+edition = "2018"
+license = "Apache-2.0"
 description = "a simple, humane, typed Rust interface to LMDB"
+documentation = "https://docs.rs/rkv"
 homepage = "https://github.com/mozilla/rkv"
-documentation = "https://docs.rs/rkv"
+repository = "https://github.com/mozilla/rkv"
 readme = "README.md"
 keywords = ["lmdb", "database", "storage"]
 categories = ["database"]
-license = "Apache-2.0"
-repository = "https://github.com/mozilla/rkv"
-[dependencies.arrayref]
-version = "0.3"
-
-[dependencies.bincode]
-version = "1.0"
-
-[dependencies.bitflags]
-version = "1"
-
-[dependencies.byteorder]
-version = "1"
-
-[dependencies.failure]
-version = "0.1"
-features = ["derive"]
-default_features = false
-
-[dependencies.lazy_static]
-version = "1.0"
-
-[dependencies.lmdb-rkv]
-version = "0.12.3"
-
-[dependencies.ordered-float]
-version = "1.0"
-
-[dependencies.serde]
-version = "1.0"
-
-[dependencies.serde_derive]
-version = "1.0"
-
-[dependencies.url]
-version = "2.0"
-
-[dependencies.uuid]
-version = "0.7"
-[dev-dependencies.byteorder]
-version = "1"
-
-[dev-dependencies.tempfile]
-version = "3"
+exclude = ["/tests/envs/*"]
 
 [features]
+default = ["db-dup-sort", "db-int-key"]
 backtrace = ["failure/backtrace", "failure/std"]
-default = []
+db-dup-sort = []
+db-int-key = []
 with-asan = ["lmdb-rkv/with-asan"]
 with-fuzzer = ["lmdb-rkv/with-fuzzer"]
 with-fuzzer-no-link = ["lmdb-rkv/with-fuzzer-no-link"]
+
+[dependencies]
+arrayref = "0.3"
+bincode = "1.0"
+bitflags = "1"
+byteorder = "1"
+id-arena = "2.2"
+lazy_static = "1.0"
+lmdb-rkv = "0.12.3"
+log = "0.4"
+ordered-float = "1.0"
+uuid = "0.7"
+serde = { version = "1.0", features = ["derive", "rc"] }
+serde_derive = "1.0"
+url = "2.0"
+
+# Get rid of failure's dependency on backtrace. Eventually
+# backtrace will move into Rust core, but we don't need it here.
+[dependencies.failure]
+version = "0.1"
+default_features = false
+features = ["derive"]
+
+[dev-dependencies]
+byteorder = "1"
+tempfile = "3"
--- a/third_party/rust/rkv/examples/iterator.rs
+++ b/third_party/rust/rkv/examples/iterator.rs
@@ -2,35 +2,42 @@
 // http://creativecommons.org/publicdomain/zero/1.0/
 
 //! A demo that showcases the basic usage of iterators in rkv.
 //!
 //! You can test this out by running:
 //!
 //!     cargo run --example iterator
 
+use std::fs;
+use std::str;
+
+use tempfile::Builder;
+
+use rkv::backend::{
+    Lmdb,
+    LmdbDatabase,
+    LmdbEnvironment,
+};
 use rkv::{
     Manager,
     Rkv,
     SingleStore,
     StoreError,
     StoreOptions,
     Value,
 };
-use tempfile::Builder;
-
-use std::fs;
-use std::str;
 
 fn main() {
     let root = Builder::new().prefix("iterator").tempdir().unwrap();
     fs::create_dir_all(root.path()).unwrap();
     let p = root.path();
 
-    let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new).unwrap();
+    let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
+    let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap();
     let k = created_arc.read().unwrap();
     let store = k.open_single("store", StoreOptions::create()).unwrap();
 
     populate_store(&k, store).unwrap();
 
     let reader = k.read().unwrap();
 
     println!("Iterating from the beginning...");
@@ -53,17 +60,17 @@ fn main() {
     println!();
     println!("Iterating from the given prefix...");
     let mut iter = store.iter_from(&reader, "Un").unwrap();
     while let Some(Ok((country, city))) = iter.next() {
         println!("{}, {:?}", str::from_utf8(country).unwrap(), city);
     }
 }
 
-fn populate_store(k: &Rkv, store: SingleStore) -> Result<(), StoreError> {
+fn populate_store(k: &Rkv<LmdbEnvironment>, store: SingleStore<LmdbDatabase>) -> Result<(), StoreError> {
     let mut writer = k.write()?;
     for (country, city) in vec![
         ("Canada", Value::Str("Ottawa")),
         ("United States of America", Value::Str("Washington")),
         ("Germany", Value::Str("Berlin")),
         ("France", Value::Str("Paris")),
         ("Italy", Value::Str("Rome")),
         ("United Kingdom", Value::Str("London")),
--- a/third_party/rust/rkv/examples/simple-store.rs
+++ b/third_party/rust/rkv/examples/simple-store.rs
@@ -2,27 +2,36 @@
 // http://creativecommons.org/publicdomain/zero/1.0/
 
 //! A simple rkv demo that showcases the basic usage (put/get/delete) of rkv.
 //!
 //! You can test this out by running:
 //!
 //!     cargo run --example simple-store
 
+use std::fs;
+
+use tempfile::Builder;
+
+use rkv::backend::{
+    BackendStat,
+    Lmdb,
+    LmdbDatabase,
+    LmdbEnvironment,
+    LmdbRwTransaction,
+};
 use rkv::{
     Manager,
-    MultiStore,
     Rkv,
     StoreOptions,
     Value,
-    Writer,
 };
-use tempfile::Builder;
 
-use std::fs;
+type MultiStore = rkv::MultiStore<LmdbDatabase>;
+type Writer<'env> = rkv::Writer<LmdbRwTransaction<'env>>;
 
 fn getput<'env, 's>(store: MultiStore, writer: &'env mut Writer, ids: &'s mut Vec<String>) {
     let keys = vec!["str1", "str2", "str3"];
     // we convert the writer into a cursor so that we can safely read
     for k in keys.iter() {
         // this is a multi-valued database, so get returns an iterator
         let mut iter = store.get(writer, k).unwrap();
         while let Some(Ok((_key, val))) = iter.next() {
@@ -48,22 +57,22 @@ fn delete(store: MultiStore, writer: &mu
 }
 
 fn main() {
     let root = Builder::new().prefix("simple-db").tempdir().unwrap();
     fs::create_dir_all(root.path()).unwrap();
     let p = root.path();
 
     // The manager enforces that each process opens the same lmdb environment at most once
-    let created_arc = Manager::singleton().write().unwrap().get_or_create(p, Rkv::new).unwrap();
+    let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
+    let created_arc = manager.get_or_create(p, Rkv::new::<Lmdb>).unwrap();
     let k = created_arc.read().unwrap();
 
     // Creates a store called "store"
     let store = k.open_single("store", StoreOptions::create()).unwrap();
-
     let multistore = k.open_multi("multistore", StoreOptions::create()).unwrap();
 
     println!("Inserting data...");
     {
         // Use a writer to mutate the store
         let mut writer = k.write().unwrap();
         store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
         store.put(&mut writer, "uint", &Value::U64(1234_u64)).unwrap();
@@ -90,16 +99,17 @@ fn main() {
         multistore.put(&mut writer, "str3", &Value::Str("string ocho")).unwrap();
         multistore.put(&mut writer, "str3", &Value::Str("string nueve")).unwrap();
         getput(multistore, &mut writer, &mut ids);
         writer.commit().unwrap();
         let mut writer = k.write().unwrap();
         delete(multistore, &mut writer);
         writer.commit().unwrap();
     }
+
     println!("Looking up keys...");
     {
         // Use a reader to query the store
         let reader = k.read().unwrap();
         println!("Get int {:?}", store.get(&reader, "int").unwrap());
         println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
         println!("Get float {:?}", store.get(&reader, "float").unwrap());
         println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
@@ -174,10 +184,11 @@ fn main() {
         store.put(&mut writer, "foo", &Value::Str("bar")).unwrap();
         another_store.put(&mut writer, "foo", &Value::Str("baz")).unwrap();
         writer.commit().unwrap();
 
         let reader = k.read().unwrap();
         println!("Get from store value: {:?}", store.get(&reader, "foo").unwrap());
         println!("Get from another store value: {:?}", another_store.get(&reader, "foo").unwrap());
     }
+
     println!("Environment statistics: btree depth = {}", k.stat().unwrap().depth());
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend.rs
@@ -0,0 +1,33 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+mod common;
+mod impl_lmdb;
+mod impl_safe;
+mod traits;
+
+pub use common::*;
+pub use traits::*;
+
+pub use impl_lmdb::DatabaseImpl as LmdbDatabase;
+pub use impl_lmdb::EnvironmentBuilderImpl as Lmdb;
+pub use impl_lmdb::EnvironmentImpl as LmdbEnvironment;
+pub use impl_lmdb::ErrorImpl as LmdbError;
+pub use impl_lmdb::RoCursorImpl as LmdbRoCursor;
+pub use impl_lmdb::RoTransactionImpl as LmdbRoTransaction;
+pub use impl_lmdb::RwTransactionImpl as LmdbRwTransaction;
+
+pub use impl_safe::DatabaseId as SafeModeDatabase;
+pub use impl_safe::EnvironmentBuilderImpl as SafeMode;
+pub use impl_safe::EnvironmentImpl as SafeModeEnvironment;
+pub use impl_safe::ErrorImpl as SafeModeError;
+pub use impl_safe::RoCursorImpl as SafeModeRoCursor;
+pub use impl_safe::RoTransactionImpl as SafeModeRoTransaction;
+pub use impl_safe::RwTransactionImpl as SafeModeRwTransaction;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/common.rs
@@ -0,0 +1,43 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+#![allow(non_camel_case_types)]
+
+pub enum EnvironmentFlags {
+    FIXED_MAP,
+    NO_SUB_DIR,
+    WRITE_MAP,
+    READ_ONLY,
+    NO_META_SYNC,
+    NO_SYNC,
+    MAP_ASYNC,
+    NO_TLS,
+    NO_LOCK,
+    NO_READAHEAD,
+    NO_MEM_INIT,
+}
+
+pub enum DatabaseFlags {
+    REVERSE_KEY,
+    #[cfg(feature = "db-dup-sort")]
+    DUP_SORT,
+    #[cfg(feature = "db-int-key")]
+    INTEGER_KEY,
+    DUP_FIXED,
+    INTEGER_DUP,
+    REVERSE_DUP,
+}
+
+pub enum WriteFlags {
+    NO_OVERWRITE,
+    NO_DUP_DATA,
+    CURRENT,
+    APPEND,
+    APPEND_DUP,
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb.rs
@@ -0,0 +1,42 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+mod cursor;
+mod database;
+mod environment;
+mod error;
+mod flags;
+mod info;
+mod iter;
+mod stat;
+mod transaction;
+
+pub use cursor::{
+    RoCursorImpl,
+    RwCursorImpl,
+};
+pub use database::DatabaseImpl;
+pub use environment::{
+    EnvironmentBuilderImpl,
+    EnvironmentImpl,
+};
+pub use error::ErrorImpl;
+pub use flags::{
+    DatabaseFlagsImpl,
+    EnvironmentFlagsImpl,
+    WriteFlagsImpl,
+};
+pub use info::InfoImpl;
+pub use iter::IterImpl;
+pub use stat::StatImpl;
+pub use transaction::{
+    RoTransactionImpl,
+    RwTransactionImpl,
+};
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/cursor.rs
@@ -0,0 +1,69 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use lmdb::Cursor;
+
+use super::IterImpl;
+use crate::backend::traits::BackendRoCursor;
+
+#[derive(Debug)]
+pub struct RoCursorImpl<'env>(pub(crate) lmdb::RoCursor<'env>);
+
+impl<'env> BackendRoCursor<'env> for RoCursorImpl<'env> {
+    type Iter = IterImpl<'env, lmdb::RoCursor<'env>>;
+
+    fn into_iter(self) -> Self::Iter {
+        // We call RoCursor.iter() instead of RoCursor.iter_start() because
+        // the latter panics when there are no items in the store, whereas the
+        // former returns an iterator that yields no items. And since we create
+        // the Cursor and don't change its position, we can be sure that a call
+        // to Cursor.iter() will start at the beginning.
+        IterImpl::new(self.0, lmdb::RoCursor::iter)
+    }
+
+    fn into_iter_from<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        IterImpl::new(self.0, |cursor| cursor.iter_from(key))
+    }
+
+    fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key))
+    }
+}
+
+#[derive(Debug)]
+pub struct RwCursorImpl<'env>(pub(crate) lmdb::RwCursor<'env>);
+
+impl<'env> BackendRoCursor<'env> for RwCursorImpl<'env> {
+    type Iter = IterImpl<'env, lmdb::RwCursor<'env>>;
+
+    fn into_iter(self) -> Self::Iter {
+        IterImpl::new(self.0, lmdb::RwCursor::iter)
+    }
+
+    fn into_iter_from<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        IterImpl::new(self.0, |cursor| cursor.iter_from(key))
+    }
+
+    fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        IterImpl::new(self.0, |cursor| cursor.iter_dup_of(key))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/database.rs
@@ -0,0 +1,16 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::backend::traits::BackendDatabase;
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct DatabaseImpl(pub(crate) lmdb::Database);
+
+impl BackendDatabase for DatabaseImpl {}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/environment.rs
@@ -0,0 +1,115 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::path::Path;
+
+use super::{
+    DatabaseFlagsImpl,
+    DatabaseImpl,
+    EnvironmentFlagsImpl,
+    ErrorImpl,
+    InfoImpl,
+    RoTransactionImpl,
+    RwTransactionImpl,
+    StatImpl,
+};
+use crate::backend::traits::{
+    BackendEnvironment,
+    BackendEnvironmentBuilder,
+};
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub struct EnvironmentBuilderImpl(lmdb::EnvironmentBuilder);
+
+impl<'env> BackendEnvironmentBuilder<'env> for EnvironmentBuilderImpl {
+    type Error = ErrorImpl;
+    type Environment = EnvironmentImpl;
+    type Flags = EnvironmentFlagsImpl;
+
+    fn new() -> EnvironmentBuilderImpl {
+        EnvironmentBuilderImpl(lmdb::Environment::new())
+    }
+
+    fn set_flags<T>(&mut self, flags: T) -> &mut Self
+    where
+        T: Into<Self::Flags>,
+    {
+        self.0.set_flags(flags.into().0);
+        self
+    }
+
+    fn set_max_readers(&mut self, max_readers: u32) -> &mut Self {
+        self.0.set_max_readers(max_readers);
+        self
+    }
+
+    fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self {
+        self.0.set_max_dbs(max_dbs);
+        self
+    }
+
+    fn set_map_size(&mut self, size: usize) -> &mut Self {
+        self.0.set_map_size(size);
+        self
+    }
+
+    fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
+        self.0.open(path).map(EnvironmentImpl).map_err(ErrorImpl)
+    }
+}
+
+#[derive(Debug)]
+pub struct EnvironmentImpl(lmdb::Environment);
+
+impl<'env> BackendEnvironment<'env> for EnvironmentImpl {
+    type Error = ErrorImpl;
+    type Database = DatabaseImpl;
+    type Flags = DatabaseFlagsImpl;
+    type Stat = StatImpl;
+    type Info = InfoImpl;
+    type RoTransaction = RoTransactionImpl<'env>;
+    type RwTransaction = RwTransactionImpl<'env>;
+
+    fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
+        self.0.open_db(name).map(DatabaseImpl).map_err(ErrorImpl)
+    }
+
+    fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
+        self.0.create_db(name, flags.0).map(DatabaseImpl).map_err(ErrorImpl)
+    }
+
+    fn begin_ro_txn(&'env self) -> Result<Self::RoTransaction, Self::Error> {
+        self.0.begin_ro_txn().map(RoTransactionImpl).map_err(ErrorImpl)
+    }
+
+    fn begin_rw_txn(&'env self) -> Result<Self::RwTransaction, Self::Error> {
+        self.0.begin_rw_txn().map(RwTransactionImpl).map_err(ErrorImpl)
+    }
+
+    fn sync(&self, force: bool) -> Result<(), Self::Error> {
+        self.0.sync(force).map_err(ErrorImpl)
+    }
+
+    fn stat(&self) -> Result<Self::Stat, Self::Error> {
+        self.0.stat().map(StatImpl).map_err(ErrorImpl)
+    }
+
+    fn info(&self) -> Result<Self::Info, Self::Error> {
+        self.0.info().map(InfoImpl).map_err(ErrorImpl)
+    }
+
+    fn freelist(&self) -> Result<usize, Self::Error> {
+        self.0.freelist().map_err(ErrorImpl)
+    }
+
+    fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
+        self.0.set_map_size(size).map_err(ErrorImpl)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/error.rs
@@ -0,0 +1,35 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::fmt;
+
+use crate::backend::traits::BackendError;
+use crate::error::StoreError;
+
+#[derive(Debug)]
+pub struct ErrorImpl(pub(crate) lmdb::Error);
+
+impl BackendError for ErrorImpl {}
+
+impl fmt::Display for ErrorImpl {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        self.0.fmt(fmt)
+    }
+}
+
+impl Into<StoreError> for ErrorImpl {
+    fn into(self) -> StoreError {
+        match self.0 {
+            lmdb::Error::NotFound => StoreError::KeyValuePairNotFound,
+            lmdb::Error::Invalid => StoreError::DatabaseInvalid,
+            _ => StoreError::LmdbError(self.0),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/flags.rs
@@ -0,0 +1,129 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::backend::common::{
+    DatabaseFlags,
+    EnvironmentFlags,
+    WriteFlags,
+};
+use crate::backend::traits::{
+    BackendDatabaseFlags,
+    BackendEnvironmentFlags,
+    BackendFlags,
+    BackendWriteFlags,
+};
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
+pub struct EnvironmentFlagsImpl(pub(crate) lmdb::EnvironmentFlags);
+
+impl BackendFlags for EnvironmentFlagsImpl {
+    fn empty() -> EnvironmentFlagsImpl {
+        EnvironmentFlagsImpl(lmdb::EnvironmentFlags::empty())
+    }
+}
+
+impl BackendEnvironmentFlags for EnvironmentFlagsImpl {
+    fn set(&mut self, flag: EnvironmentFlags, value: bool) {
+        self.0.set(flag.into(), value)
+    }
+}
+
+impl Into<EnvironmentFlagsImpl> for EnvironmentFlags {
+    fn into(self) -> EnvironmentFlagsImpl {
+        EnvironmentFlagsImpl(self.into())
+    }
+}
+
+impl Into<lmdb::EnvironmentFlags> for EnvironmentFlags {
+    fn into(self) -> lmdb::EnvironmentFlags {
+        match self {
+            EnvironmentFlags::FIXED_MAP => lmdb::EnvironmentFlags::FIXED_MAP,
+            EnvironmentFlags::NO_SUB_DIR => lmdb::EnvironmentFlags::NO_SUB_DIR,
+            EnvironmentFlags::WRITE_MAP => lmdb::EnvironmentFlags::WRITE_MAP,
+            EnvironmentFlags::READ_ONLY => lmdb::EnvironmentFlags::READ_ONLY,
+            EnvironmentFlags::NO_META_SYNC => lmdb::EnvironmentFlags::NO_META_SYNC,
+            EnvironmentFlags::NO_SYNC => lmdb::EnvironmentFlags::NO_SYNC,
+            EnvironmentFlags::MAP_ASYNC => lmdb::EnvironmentFlags::MAP_ASYNC,
+            EnvironmentFlags::NO_TLS => lmdb::EnvironmentFlags::NO_TLS,
+            EnvironmentFlags::NO_LOCK => lmdb::EnvironmentFlags::NO_LOCK,
+            EnvironmentFlags::NO_READAHEAD => lmdb::EnvironmentFlags::NO_READAHEAD,
+            EnvironmentFlags::NO_MEM_INIT => lmdb::EnvironmentFlags::NO_MEM_INIT,
+        }
+    }
+}
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
+pub struct DatabaseFlagsImpl(pub(crate) lmdb::DatabaseFlags);
+
+impl BackendFlags for DatabaseFlagsImpl {
+    fn empty() -> DatabaseFlagsImpl {
+        DatabaseFlagsImpl(lmdb::DatabaseFlags::empty())
+    }
+}
+
+impl BackendDatabaseFlags for DatabaseFlagsImpl {
+    fn set(&mut self, flag: DatabaseFlags, value: bool) {
+        self.0.set(flag.into(), value)
+    }
+}
+
+impl Into<DatabaseFlagsImpl> for DatabaseFlags {
+    fn into(self) -> DatabaseFlagsImpl {
+        DatabaseFlagsImpl(self.into())
+    }
+}
+
+impl Into<lmdb::DatabaseFlags> for DatabaseFlags {
+    fn into(self) -> lmdb::DatabaseFlags {
+        match self {
+            DatabaseFlags::REVERSE_KEY => lmdb::DatabaseFlags::REVERSE_KEY,
+            #[cfg(feature = "db-dup-sort")]
+            DatabaseFlags::DUP_SORT => lmdb::DatabaseFlags::DUP_SORT,
+            #[cfg(feature = "db-int-key")]
+            DatabaseFlags::INTEGER_KEY => lmdb::DatabaseFlags::INTEGER_KEY,
+            DatabaseFlags::DUP_FIXED => lmdb::DatabaseFlags::DUP_FIXED,
+            DatabaseFlags::INTEGER_DUP => lmdb::DatabaseFlags::INTEGER_DUP,
+            DatabaseFlags::REVERSE_DUP => lmdb::DatabaseFlags::REVERSE_DUP,
+        }
+    }
+}
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
+pub struct WriteFlagsImpl(pub(crate) lmdb::WriteFlags);
+
+impl BackendFlags for WriteFlagsImpl {
+    fn empty() -> WriteFlagsImpl {
+        WriteFlagsImpl(lmdb::WriteFlags::empty())
+    }
+}
+
+impl BackendWriteFlags for WriteFlagsImpl {
+    fn set(&mut self, flag: WriteFlags, value: bool) {
+        self.0.set(flag.into(), value)
+    }
+}
+
+impl Into<WriteFlagsImpl> for WriteFlags {
+    fn into(self) -> WriteFlagsImpl {
+        WriteFlagsImpl(self.into())
+    }
+}
+
+impl Into<lmdb::WriteFlags> for WriteFlags {
+    fn into(self) -> lmdb::WriteFlags {
+        match self {
+            WriteFlags::NO_OVERWRITE => lmdb::WriteFlags::NO_OVERWRITE,
+            WriteFlags::NO_DUP_DATA => lmdb::WriteFlags::NO_DUP_DATA,
+            WriteFlags::CURRENT => lmdb::WriteFlags::CURRENT,
+            WriteFlags::APPEND => lmdb::WriteFlags::APPEND,
+            WriteFlags::APPEND_DUP => lmdb::WriteFlags::APPEND_DUP,
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/info.rs
@@ -0,0 +1,35 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::backend::traits::BackendInfo;
+
+pub struct InfoImpl(pub(crate) lmdb::Info);
+
+impl BackendInfo for InfoImpl {
+    fn map_size(&self) -> usize {
+        self.0.map_size()
+    }
+
+    fn last_pgno(&self) -> usize {
+        self.0.last_pgno()
+    }
+
+    fn last_txnid(&self) -> usize {
+        self.0.last_txnid()
+    }
+
+    fn max_readers(&self) -> usize {
+        self.0.max_readers() as usize
+    }
+
+    fn num_readers(&self) -> usize {
+        self.0.num_readers() as usize
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/iter.rs
@@ -0,0 +1,41 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use super::ErrorImpl;
+use crate::backend::traits::BackendIter;
+
+pub struct IterImpl<'env, C> {
+    // LMDB semantics dictate that a cursor must be valid for the entire lifetime
+    // of an iterator. In other words, cursors must not be dropped while an
+    // iterator built from it is alive. Unfortunately, the LMDB crate API does
+    // not express this through the type system, so we must enforce it somehow.
+    #[allow(dead_code)]
+    cursor: C,
+    iter: lmdb::Iter<'env>,
+}
+
+impl<'env, C> IterImpl<'env, C> {
+    pub(crate) fn new(mut cursor: C, to_iter: impl FnOnce(&mut C) -> lmdb::Iter<'env>) -> IterImpl<'env, C> {
+        let iter = to_iter(&mut cursor);
+        IterImpl {
+            cursor,
+            iter,
+        }
+    }
+}
+
+impl<'env, C> BackendIter<'env> for IterImpl<'env, C> {
+    type Error = ErrorImpl;
+
+    #[allow(clippy::type_complexity)]
+    fn next(&mut self) -> Option<Result<(&'env [u8], &'env [u8]), Self::Error>> {
+        self.iter.next().map(|e| e.map_err(ErrorImpl))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/stat.rs
@@ -0,0 +1,39 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::backend::traits::BackendStat;
+
+pub struct StatImpl(pub(crate) lmdb::Stat);
+
+impl BackendStat for StatImpl {
+    fn page_size(&self) -> usize {
+        self.0.page_size() as usize
+    }
+
+    fn depth(&self) -> usize {
+        self.0.depth() as usize
+    }
+
+    fn branch_pages(&self) -> usize {
+        self.0.branch_pages()
+    }
+
+    fn leaf_pages(&self) -> usize {
+        self.0.leaf_pages()
+    }
+
+    fn overflow_pages(&self) -> usize {
+        self.0.overflow_pages()
+    }
+
+    fn entries(&self) -> usize {
+        self.0.entries()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_lmdb/transaction.rs
@@ -0,0 +1,95 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use lmdb::Transaction;
+
+use super::{
+    DatabaseImpl,
+    ErrorImpl,
+    RoCursorImpl,
+    WriteFlagsImpl,
+};
+use crate::backend::traits::{
+    BackendRoCursorTransaction,
+    BackendRoTransaction,
+    BackendRwCursorTransaction,
+    BackendRwTransaction,
+};
+
+#[derive(Debug)]
+pub struct RoTransactionImpl<'env>(pub(crate) lmdb::RoTransaction<'env>);
+
+impl<'env> BackendRoTransaction for RoTransactionImpl<'env> {
+    type Error = ErrorImpl;
+    type Database = DatabaseImpl;
+
+    fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
+        self.0.get(db.0, &key).map_err(ErrorImpl)
+    }
+
+    fn abort(self) {
+        self.0.abort()
+    }
+}
+
+impl<'env> BackendRoCursorTransaction<'env> for RoTransactionImpl<'env> {
+    type RoCursor = RoCursorImpl<'env>;
+
+    fn open_ro_cursor(&'env self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
+        self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl)
+    }
+}
+
+#[derive(Debug)]
+pub struct RwTransactionImpl<'env>(pub(crate) lmdb::RwTransaction<'env>);
+
+impl<'env> BackendRwTransaction for RwTransactionImpl<'env> {
+    type Error = ErrorImpl;
+    type Database = DatabaseImpl;
+    type Flags = WriteFlagsImpl;
+
+    fn get(&self, db: &Self::Database, key: &[u8]) -> Result<&[u8], Self::Error> {
+        self.0.get(db.0, &key).map_err(ErrorImpl)
+    }
+
+    fn put(&mut self, db: &Self::Database, key: &[u8], value: &[u8], flags: Self::Flags) -> Result<(), Self::Error> {
+        self.0.put(db.0, &key, &value, flags.0).map_err(ErrorImpl)
+    }
+
+    #[cfg(not(feature = "db-dup-sort"))]
+    fn del(&mut self, db: &Self::Database, key: &[u8]) -> Result<(), Self::Error> {
+        self.0.del(db.0, &key, None).map_err(ErrorImpl)
+    }
+
+    #[cfg(feature = "db-dup-sort")]
+    fn del(&mut self, db: &Self::Database, key: &[u8], value: Option<&[u8]>) -> Result<(), Self::Error> {
+        self.0.del(db.0, &key, value).map_err(ErrorImpl)
+    }
+
+    fn clear_db(&mut self, db: &Self::Database) -> Result<(), Self::Error> {
+        self.0.clear_db(db.0).map_err(ErrorImpl)
+    }
+
+    fn commit(self) -> Result<(), Self::Error> {
+        self.0.commit().map_err(ErrorImpl)
+    }
+
+    fn abort(self) {
+        self.0.abort()
+    }
+}
+
+impl<'env> BackendRwCursorTransaction<'env> for RwTransactionImpl<'env> {
+    type RoCursor = RoCursorImpl<'env>;
+
+    fn open_ro_cursor(&'env self, db: &Self::Database) -> Result<Self::RoCursor, Self::Error> {
+        self.0.open_ro_cursor(db.0).map(RoCursorImpl).map_err(ErrorImpl)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe.rs
@@ -0,0 +1,43 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+mod cursor;
+mod database;
+mod environment;
+mod error;
+mod flags;
+mod info;
+mod iter;
+mod snapshot;
+mod stat;
+mod transaction;
+
+pub use cursor::{
+    RoCursorImpl,
+    RwCursorImpl,
+};
+pub use database::DatabaseId;
+pub use environment::{
+    EnvironmentBuilderImpl,
+    EnvironmentImpl,
+};
+pub use error::ErrorImpl;
+pub use flags::{
+    DatabaseFlagsImpl,
+    EnvironmentFlagsImpl,
+    WriteFlagsImpl,
+};
+pub use info::InfoImpl;
+pub use iter::IterImpl;
+pub use stat::StatImpl;
+pub use transaction::{
+    RoTransactionImpl,
+    RwTransactionImpl,
+};
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/cursor.rs
@@ -0,0 +1,69 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use super::{
+    snapshot::Snapshot,
+    IterImpl,
+};
+use crate::backend::traits::BackendRoCursor;
+
+#[derive(Debug)]
+pub struct RoCursorImpl<'env>(pub(crate) &'env Snapshot);
+
+impl<'env> BackendRoCursor<'env> for RoCursorImpl<'env> {
+    type Iter = IterImpl<'env>;
+
+    fn into_iter(self) -> Self::Iter {
+        IterImpl(Box::new(self.0.iter()))
+    }
+
+    fn into_iter_from<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        // FIXME: Don't allocate.
+        let key = key.as_ref().to_vec();
+        IterImpl(Box::new(self.0.iter().skip_while(move |&(k, _)| k < key.as_slice())))
+    }
+
+    fn into_iter_dup_of<K>(self, key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        // FIXME: Don't allocate.
+        let key = key.as_ref().to_vec();
+        IterImpl(Box::new(self.0.iter().filter(move |&(k, _)| k == key.as_slice())))
+    }
+}
+
+#[derive(Debug)]
+pub struct RwCursorImpl<'env>(&'env mut Snapshot);
+
+impl<'env> BackendRoCursor<'env> for RwCursorImpl<'env> {
+    type Iter = IterImpl<'env>;
+
+    fn into_iter(self) -> Self::Iter {
+        unimplemented!()
+    }
+
+    fn into_iter_from<K>(self, _key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        unimplemented!()
+    }
+
+    fn into_iter_dup_of<K>(self, _key: K) -> Self::Iter
+    where
+        K: AsRef<[u8]>,
+    {
+        unimplemented!()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/database.rs
@@ -0,0 +1,44 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use id_arena::Id;
+use serde_derive::{
+    Deserialize,
+    Serialize,
+};
+
+use super::snapshot::Snapshot;
+use super::DatabaseFlagsImpl;
+use crate::backend::traits::BackendDatabase;
+
+pub type DatabaseId = Id<DatabaseImpl>;
+
+impl BackendDatabase for DatabaseId {}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct DatabaseImpl {
+    snapshot: Snapshot,
+}
+
+impl DatabaseImpl {
+    pub(crate) fn new(flags: Option<DatabaseFlagsImpl>, snapshot: Option<Snapshot>) -> DatabaseImpl {
+        DatabaseImpl {
+            snapshot: snapshot.unwrap_or_else(|| Snapshot::new(flags)),
+        }
+    }
+
+    pub(crate) fn snapshot(&self) -> Snapshot {
+        self.snapshot.clone()
+    }
+
+    pub(crate) fn replace(&mut self, snapshot: Snapshot) -> Snapshot {
+        std::mem::replace(&mut self.snapshot, snapshot)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/environment.rs
@@ -0,0 +1,251 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::borrow::Cow;
+use std::collections::HashMap;
+use std::fs;
+use std::path::{
+    Path,
+    PathBuf,
+};
+use std::sync::Arc;
+use std::sync::{
+    RwLock,
+    RwLockReadGuard,
+    RwLockWriteGuard,
+};
+
+use id_arena::Arena;
+use log::warn;
+
+use super::{
+    database::DatabaseImpl,
+    DatabaseFlagsImpl,
+    DatabaseId,
+    EnvironmentFlagsImpl,
+    ErrorImpl,
+    InfoImpl,
+    RoTransactionImpl,
+    RwTransactionImpl,
+    StatImpl,
+};
+use crate::backend::traits::{
+    BackendEnvironment,
+    BackendEnvironmentBuilder,
+};
+
+const DEFAULT_DB_FILENAME: &str = "data.safe.bin";
+
+type DatabaseArena = Arena<DatabaseImpl>;
+type DatabaseNameMap = HashMap<Option<String>, DatabaseId>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub struct EnvironmentBuilderImpl {
+    flags: EnvironmentFlagsImpl,
+    max_readers: Option<usize>,
+    max_dbs: Option<usize>,
+    map_size: Option<usize>,
+}
+
+impl<'env> BackendEnvironmentBuilder<'env> for EnvironmentBuilderImpl {
+    type Error = ErrorImpl;
+    type Environment = EnvironmentImpl;
+    type Flags = EnvironmentFlagsImpl;
+
+    fn new() -> EnvironmentBuilderImpl {
+        EnvironmentBuilderImpl {
+            flags: EnvironmentFlagsImpl::empty(),
+            max_readers: None,
+            max_dbs: None,
+            map_size: None,
+        }
+    }
+
+    fn set_flags<T>(&mut self, flags: T) -> &mut Self
+    where
+        T: Into<Self::Flags>,
+    {
+        self.flags = flags.into();
+        self
+    }
+
+    fn set_max_readers(&mut self, max_readers: u32) -> &mut Self {
+        self.max_readers = Some(max_readers as usize);
+        self
+    }
+
+    fn set_max_dbs(&mut self, max_dbs: u32) -> &mut Self {
+        self.max_dbs = Some(max_dbs as usize);
+        self
+    }
+
+    fn set_map_size(&mut self, map_size: usize) -> &mut Self {
+        self.map_size = Some(map_size);
+        self
+    }
+
+    fn open(&self, path: &Path) -> Result<Self::Environment, Self::Error> {
+        let mut env = EnvironmentImpl::new(path, self.flags, self.max_readers, self.max_dbs, self.map_size)?;
+        env.read_from_disk()?;
+        Ok(env)
+    }
+}
+
+#[derive(Debug)]
+pub struct EnvironmentImpl {
+    path: PathBuf,
+    max_dbs: usize,
+    arena: RwLock<DatabaseArena>,
+    dbs: RwLock<DatabaseNameMap>,
+    ro_txns: Arc<()>,
+    rw_txns: Arc<()>,
+}
+
+impl EnvironmentImpl {
+    fn serialize(&self) -> Result<Vec<u8>, ErrorImpl> {
+        let arena = self.arena.read().map_err(|_| ErrorImpl::DbPoisonError)?;
+        let dbs = self.dbs.read().map_err(|_| ErrorImpl::DbPoisonError)?;
+        let data: HashMap<_, _> = dbs.iter().map(|(name, id)| (name, &arena[*id])).collect();
+        Ok(bincode::serialize(&data)?)
+    }
+
+    fn deserialize(bytes: &[u8]) -> Result<(DatabaseArena, DatabaseNameMap), ErrorImpl> {
+        let mut arena = DatabaseArena::new();
+        let mut dbs = HashMap::new();
+        let data: HashMap<_, _> = bincode::deserialize(&bytes)?;
+        for (name, db) in data {
+            dbs.insert(name, arena.alloc(db));
+        }
+        Ok((arena, dbs))
+    }
+}
+
+impl EnvironmentImpl {
+    pub(crate) fn new(
+        path: &Path,
+        flags: EnvironmentFlagsImpl,
+        max_readers: Option<usize>,
+        max_dbs: Option<usize>,
+        map_size: Option<usize>,
+    ) -> Result<EnvironmentImpl, ErrorImpl> {
+        if !flags.is_empty() {
+            warn!("Ignoring `flags={:?}`", flags);
+        }
+        if let Some(max_readers) = max_readers {
+            warn!("Ignoring `max_readers={}`", max_readers);
+        }
+        if let Some(map_size) = map_size {
+            warn!("Ignoring `map_size={}`", map_size);
+        }
+
+        Ok(EnvironmentImpl {
+            path: path.to_path_buf(),
+            max_dbs: max_dbs.unwrap_or(std::usize::MAX),
+            arena: RwLock::new(DatabaseArena::new()),
+            dbs: RwLock::new(HashMap::new()),
+            ro_txns: Arc::new(()),
+            rw_txns: Arc::new(()),
+        })
+    }
+
+    pub(crate) fn read_from_disk(&mut self) -> Result<(), ErrorImpl> {
+        let mut path = Cow::from(&self.path);
+        if fs::metadata(&path)?.is_dir() {
+            path.to_mut().push(DEFAULT_DB_FILENAME);
+        };
+        if fs::metadata(&path).is_err() {
+            return Ok(());
+        };
+        let (arena, dbs) = Self::deserialize(&fs::read(&path)?)?;
+        self.arena = RwLock::new(arena);
+        self.dbs = RwLock::new(dbs);
+        Ok(())
+    }
+
+    pub(crate) fn write_to_disk(&self) -> Result<(), ErrorImpl> {
+        let mut path = Cow::from(&self.path);
+        if fs::metadata(&path)?.is_dir() {
+            path.to_mut().push(DEFAULT_DB_FILENAME);
+        };
+        fs::write(&path, self.serialize()?)?;
+        Ok(())
+    }
+
+    pub(crate) fn dbs(&self) -> Result<RwLockReadGuard<DatabaseArena>, ErrorImpl> {
+        self.arena.read().map_err(|_| ErrorImpl::DbPoisonError)
+    }
+
+    pub(crate) fn dbs_mut(&self) -> Result<RwLockWriteGuard<DatabaseArena>, ErrorImpl> {
+        self.arena.write().map_err(|_| ErrorImpl::DbPoisonError)
+    }
+}
+
+impl<'env> BackendEnvironment<'env> for EnvironmentImpl {
+    type Error = ErrorImpl;
+    type Database = DatabaseId;
+    type Flags = DatabaseFlagsImpl;
+    type Stat = StatImpl;
+    type Info = InfoImpl;
+    type RoTransaction = RoTransactionImpl<'env>;
+    type RwTransaction = RwTransactionImpl<'env>;
+
+    fn open_db(&self, name: Option<&str>) -> Result<Self::Database, Self::Error> {
+        if Arc::strong_count(&self.ro_txns) > 1 {
+            return Err(ErrorImpl::DbsIllegalOpen);
+        }
+        // TOOD: don't reallocate `name`.
+        let key = name.map(String::from);
+        let dbs = self.dbs.read().map_err(|_| ErrorImpl::DbPoisonError)?;
+        let id = dbs.get(&key).ok_or(ErrorImpl::DbNotFoundError)?;
+        Ok(*id)
+    }
+
+    fn create_db(&self, name: Option<&str>, flags: Self::Flags) -> Result<Self::Database, Self::Error> {
+        // TOOD: don't reallocate `name`.
+        let key = name.map(String::from);
+        let mut dbs = self.dbs.write().map_err(|_| ErrorImpl::DbPoisonError)?;
+        let mut arena = self.arena.write().map_err(|_| ErrorImpl::DbPoisonError)?;
+        if dbs.keys().filter_map(|k| k.as_ref()).count() >= self.max_dbs {
+            return Err(ErrorImpl::DbsFull);
+        }
+        let id = dbs.entry(key).or_insert_with(|| arena.alloc(DatabaseImpl::new(Some(flags), None)));
+        Ok(*id)
+    }
+
+    fn begin_ro_txn(&'env self) -> Result<Self::RoTransaction, Self::Error> {
+        RoTransactionImpl::new(self, self.ro_txns.clone())
+    }
+
+    fn begin_rw_txn(&'env self) -> Result<Self::RwTransaction, Self::Error> {
+        RwTransactionImpl::new(self, self.rw_txns.clone())
+    }
+
+    fn sync(&self, force: bool) -> Result<(), Self::Error> {
+        warn!("Ignoring `force={}`", force);
+        self.write_to_disk()
+    }
+
+    fn stat(&self) -> Result<Self::Stat, Self::Error> {
+        Ok(StatImpl)
+    }
+
+    fn info(&self) -> Result<Self::Info, Self::Error> {
+        Ok(InfoImpl)
+    }
+
+    fn freelist(&self) -> Result<usize, Self::Error> {
+        unimplemented!()
+    }
+
+    fn set_map_size(&self, size: usize) -> Result<(), Self::Error> {
+        warn!("Ignoring `set_map_size({})`", size);
+        Ok(())
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/error.rs
@@ -0,0 +1,68 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use std::fmt;
+use std::io;
+
+use bincode::Error as BincodeError;
+
+use crate::backend::traits::BackendError;
+use crate::error::StoreError;
+
+#[derive(Debug)]
+pub enum ErrorImpl {
+    KeyValuePairNotFound,
+    DbPoisonError,
+    DbsFull,
+    DbsIllegalOpen,
+    DbNotFoundError,
+    DbIsForeignError,
+    IoError(io::Error),
+    BincodeError(BincodeError),
+}
+
+impl BackendError for ErrorImpl {}
+
+impl fmt::Display for ErrorImpl {
+    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            ErrorImpl::KeyValuePairNotFound => write!(fmt, "KeyValuePairNotFound (safe mode)"),
+            ErrorImpl::DbPoisonError => write!(fmt, "DbPoisonError (safe mode)"),
+            ErrorImpl::DbsFull => write!(fmt, "DbsFull (safe mode)"),
+            ErrorImpl::DbsIllegalOpen => write!(fmt, "DbIllegalOpen (safe mode)"),
+            ErrorImpl::DbNotFoundError => write!(fmt, "DbNotFoundError (safe mode)"),
+            ErrorImpl::DbIsForeignError => write!(fmt, "DbIsForeignError (safe mode)"),
+            ErrorImpl::IoError(e) => e.fmt(fmt),
+            ErrorImpl::BincodeError(e) => e.fmt(fmt),
+        }
+    }
+}
+
+impl Into<StoreError> for ErrorImpl {
+    fn into(self) -> StoreError {
+        match self {
+            ErrorImpl::KeyValuePairNotFound => StoreError::KeyValuePairNotFound,
+            ErrorImpl::BincodeError(_) => StoreError::DatabaseInvalid,
+            _ => StoreError::SafeModeError(self),
+        }
+    }
+}
+
+impl From<io::Error> for ErrorImpl {
+    fn from(e: io::Error) -> ErrorImpl {
+        ErrorImpl::IoError(e)
+    }
+}
+
+impl From<BincodeError> for ErrorImpl {
+    fn from(e: BincodeError) -> ErrorImpl {
+        ErrorImpl::BincodeError(e)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/flags.rs
@@ -0,0 +1,133 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use bitflags::bitflags;
+use serde_derive::{
+    Deserialize,
+    Serialize,
+};
+
+use crate::backend::common::{
+    DatabaseFlags,
+    EnvironmentFlags,
+    WriteFlags,
+};
+use crate::backend::traits::{
+    BackendDatabaseFlags,
+    BackendEnvironmentFlags,
+    BackendFlags,
+    BackendWriteFlags,
+};
+
+bitflags! {
+    #[derive(Default, Serialize, Deserialize)]
+    pub struct EnvironmentFlagsImpl: u32 {
+        const NIL = 0b0000_0000;
+    }
+}
+
+impl BackendFlags for EnvironmentFlagsImpl {
+    fn empty() -> EnvironmentFlagsImpl {
+        EnvironmentFlagsImpl::empty()
+    }
+}
+
+impl BackendEnvironmentFlags for EnvironmentFlagsImpl {
+    fn set(&mut self, flag: EnvironmentFlags, value: bool) {
+        self.set(flag.into(), value)
+    }
+}
+
+impl Into<EnvironmentFlagsImpl> for EnvironmentFlags {
+    fn into(self) -> EnvironmentFlagsImpl {
+        match self {
+            EnvironmentFlags::FIXED_MAP => unimplemented!(),
+            EnvironmentFlags::NO_SUB_DIR => unimplemented!(),
+            EnvironmentFlags::WRITE_MAP => unimplemented!(),
+            EnvironmentFlags::READ_ONLY => unimplemented!(),
+            EnvironmentFlags::NO_META_SYNC => unimplemented!(),
+            EnvironmentFlags::NO_SYNC => unimplemented!(),
+            EnvironmentFlags::MAP_ASYNC => unimplemented!(),
+            EnvironmentFlags::NO_TLS => unimplemented!(),
+            EnvironmentFlags::NO_LOCK => unimplemented!(),
+            EnvironmentFlags::NO_READAHEAD => unimplemented!(),
+            EnvironmentFlags::NO_MEM_INIT => unimplemented!(),
+        }
+    }
+}
+
+bitflags! {
+    #[derive(Default, Serialize, Deserialize)]
+    pub struct DatabaseFlagsImpl: u32 {
+        const NIL = 0b0000_0000;
+        #[cfg(feature = "db-dup-sort")]
+        const DUP_SORT = 0b0000_0001;
+        #[cfg(feature = "db-int-key")]
+        const INTEGER_KEY = 0b0000_0010;
+    }
+}
+
+impl BackendFlags for DatabaseFlagsImpl {
+    fn empty() -> DatabaseFlagsImpl {
+        DatabaseFlagsImpl::empty()
+    }
+}
+
+impl BackendDatabaseFlags for DatabaseFlagsImpl {
+    fn set(&mut self, flag: DatabaseFlags, value: bool) {
+        self.set(flag.into(), value)
+    }
+}
+
+impl Into<DatabaseFlagsImpl> for DatabaseFlags {
+    fn into(self) -> DatabaseFlagsImpl {
+        match self {
+            DatabaseFlags::REVERSE_KEY => unimplemented!(),
+            #[cfg(feature = "db-dup-sort")]
+            DatabaseFlags::DUP_SORT => DatabaseFlagsImpl::DUP_SORT,
+            #[cfg(feature = "db-int-key")]
+            DatabaseFlags::INTEGER_KEY => DatabaseFlagsImpl::INTEGER_KEY,
+            DatabaseFlags::DUP_FIXED => unimplemented!(),
+            DatabaseFlags::INTEGER_DUP => unimplemented!(),
+            DatabaseFlags::REVERSE_DUP => unimplemented!(),
+        }
+    }
+}
+
+bitflags! {
+    #[derive(Default, Serialize, Deserialize)]
+    pub struct WriteFlagsImpl: u32 {
+        const NIL = 0b0000_0000;
+    }
+}
+
+impl BackendFlags for WriteFlagsImpl {
+    fn empty() -> WriteFlagsImpl {
+        WriteFlagsImpl::empty()
+    }
+}
+
+impl BackendWriteFlags for WriteFlagsImpl {
+    fn set(&mut self, flag: WriteFlags, value: bool) {
+        self.set(flag.into(), value)
+    }
+}
+
+impl Into<WriteFlagsImpl> for WriteFlags {
+    fn into(self) -> WriteFlagsImpl {
+        match self {
+            WriteFlags::NO_OVERWRITE => unimplemented!(),
+            WriteFlags::NO_DUP_DATA => unimplemented!(),
+            WriteFlags::CURRENT => unimplemented!(),
+            WriteFlags::APPEND => unimplemented!(),
+            WriteFlags::APPEND_DUP => unimplemented!(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rkv/src/backend/impl_safe/info.rs
@@ -0,0 +1,35 @@
+// Copyright 2018-2019 Mozilla
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+// this file except in compliance with the License. You may obtain a copy of the
+// License at http://www.apache.org/licenses/LICENSE-2.0
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+use crate::backend::traits::BackendInfo;
+
+pub struct InfoImpl;
+
+impl BackendInfo for InfoImpl {
+    fn map_size(&self) -> usize {
+        unimplemented!()
+    }
+
+    fn last_pgno(&self) -> usize {
+        unimplemented!()
+    }
+
+    fn last_