bug 1598286: third_party: vendor Rust dependencies; r=remote-protocol-reviewers,whimboo
authorAndreas Tolfsen <ato@sny.no>
Tue, 07 Jan 2020 13:28:57 +0000
changeset 509106 2965d6589d90cab06f93bb8b2a455eaf6eda0e8a
parent 509105 bca976fbc2c1a7dcde3a79c53bbad2cd31b923ee
child 509107 4558b24acb30bea7a8d0e9862a2e85e64ae65d4c
push id104443
push useratolfsen@mozilla.com
push dateTue, 07 Jan 2020 13:49:46 +0000
treeherderautoland@4558b24acb30 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersremote-protocol-reviewers, whimboo
bugs1598286
milestone74.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 1598286: third_party: vendor Rust dependencies; r=remote-protocol-reviewers,whimboo Differential Revision: https://phabricator.services.mozilla.com/D58766
third_party/rust/bytes-0.4.9/.cargo-checksum.json
third_party/rust/bytes-0.4.9/CHANGELOG.md
third_party/rust/bytes-0.4.9/Cargo.toml
third_party/rust/bytes-0.4.9/LICENSE
third_party/rust/bytes-0.4.9/README.md
third_party/rust/bytes-0.4.9/benches/bytes.rs
third_party/rust/bytes-0.4.9/ci/before_deploy.ps1
third_party/rust/bytes-0.4.9/ci/before_deploy.sh
third_party/rust/bytes-0.4.9/ci/install.sh
third_party/rust/bytes-0.4.9/ci/script.sh
third_party/rust/bytes-0.4.9/ci/tsan
third_party/rust/bytes-0.4.9/src/buf/buf.rs
third_party/rust/bytes-0.4.9/src/buf/buf_mut.rs
third_party/rust/bytes-0.4.9/src/buf/chain.rs
third_party/rust/bytes-0.4.9/src/buf/from_buf.rs
third_party/rust/bytes-0.4.9/src/buf/into_buf.rs
third_party/rust/bytes-0.4.9/src/buf/iter.rs
third_party/rust/bytes-0.4.9/src/buf/mod.rs
third_party/rust/bytes-0.4.9/src/buf/reader.rs
third_party/rust/bytes-0.4.9/src/buf/take.rs
third_party/rust/bytes-0.4.9/src/buf/writer.rs
third_party/rust/bytes-0.4.9/src/bytes.rs
third_party/rust/bytes-0.4.9/src/debug.rs
third_party/rust/bytes-0.4.9/src/lib.rs
third_party/rust/bytes-0.4.9/src/serde.rs
third_party/rust/bytes-0.4.9/tests/test_buf.rs
third_party/rust/bytes-0.4.9/tests/test_buf_mut.rs
third_party/rust/bytes-0.4.9/tests/test_bytes.rs
third_party/rust/bytes-0.4.9/tests/test_chain.rs
third_party/rust/bytes-0.4.9/tests/test_debug.rs
third_party/rust/bytes-0.4.9/tests/test_from_buf.rs
third_party/rust/bytes-0.4.9/tests/test_iter.rs
third_party/rust/bytes-0.4.9/tests/test_serde.rs
third_party/rust/bytes-0.4.9/tests/test_take.rs
third_party/rust/bytes/.cargo-checksum.json
third_party/rust/bytes/CHANGELOG.md
third_party/rust/bytes/Cargo.toml
third_party/rust/bytes/README.md
third_party/rust/bytes/azure-pipelines.yml
third_party/rust/bytes/benches/buf.rs
third_party/rust/bytes/benches/bytes.rs
third_party/rust/bytes/benches/bytes_mut.rs
third_party/rust/bytes/ci/azure-cross-compile.yml
third_party/rust/bytes/ci/azure-deploy-docs.yml
third_party/rust/bytes/ci/azure-install-rust.yml
third_party/rust/bytes/ci/azure-loom.yml
third_party/rust/bytes/ci/azure-test-stable.yml
third_party/rust/bytes/ci/azure-tsan.yml
third_party/rust/bytes/ci/before_deploy.ps1
third_party/rust/bytes/ci/before_deploy.sh
third_party/rust/bytes/ci/install.sh
third_party/rust/bytes/ci/script.sh
third_party/rust/bytes/ci/tsan
third_party/rust/bytes/src/buf/buf.rs
third_party/rust/bytes/src/buf/buf_impl.rs
third_party/rust/bytes/src/buf/buf_mut.rs
third_party/rust/bytes/src/buf/chain.rs
third_party/rust/bytes/src/buf/ext/chain.rs
third_party/rust/bytes/src/buf/ext/limit.rs
third_party/rust/bytes/src/buf/ext/mod.rs
third_party/rust/bytes/src/buf/ext/reader.rs
third_party/rust/bytes/src/buf/ext/take.rs
third_party/rust/bytes/src/buf/ext/writer.rs
third_party/rust/bytes/src/buf/from_buf.rs
third_party/rust/bytes/src/buf/into_buf.rs
third_party/rust/bytes/src/buf/iter.rs
third_party/rust/bytes/src/buf/mod.rs
third_party/rust/bytes/src/buf/reader.rs
third_party/rust/bytes/src/buf/take.rs
third_party/rust/bytes/src/buf/vec_deque.rs
third_party/rust/bytes/src/buf/writer.rs
third_party/rust/bytes/src/bytes.rs
third_party/rust/bytes/src/bytes_mut.rs
third_party/rust/bytes/src/debug.rs
third_party/rust/bytes/src/hex.rs
third_party/rust/bytes/src/lib.rs
third_party/rust/bytes/src/loom.rs
third_party/rust/bytes/src/serde.rs
third_party/rust/bytes/tests/test_buf.rs
third_party/rust/bytes/tests/test_buf_mut.rs
third_party/rust/bytes/tests/test_bytes.rs
third_party/rust/bytes/tests/test_chain.rs
third_party/rust/bytes/tests/test_debug.rs
third_party/rust/bytes/tests/test_from_buf.rs
third_party/rust/bytes/tests/test_iter.rs
third_party/rust/bytes/tests/test_reader.rs
third_party/rust/bytes/tests/test_serde.rs
third_party/rust/bytes/tests/test_take.rs
third_party/rust/http-0.1.17/.cargo-checksum.json
third_party/rust/http-0.1.17/CHANGELOG.md
third_party/rust/http-0.1.17/Cargo.toml
third_party/rust/http-0.1.17/LICENSE-APACHE
third_party/rust/http-0.1.17/LICENSE-MIT
third_party/rust/http-0.1.17/README.md
third_party/rust/http-0.1.17/benches/header_map/basic.rs
third_party/rust/http-0.1.17/benches/header_map/mod.rs
third_party/rust/http-0.1.17/benches/header_map/vec_map.rs
third_party/rust/http-0.1.17/benches/header_value.rs
third_party/rust/http-0.1.17/benches/uri.rs
third_party/rust/http-0.1.17/src/byte_str.rs
third_party/rust/http-0.1.17/src/convert.rs
third_party/rust/http-0.1.17/src/error.rs
third_party/rust/http-0.1.17/src/extensions.rs
third_party/rust/http-0.1.17/src/header/map.rs
third_party/rust/http-0.1.17/src/header/mod.rs
third_party/rust/http-0.1.17/src/header/name.rs
third_party/rust/http-0.1.17/src/header/value.rs
third_party/rust/http-0.1.17/src/lib.rs
third_party/rust/http-0.1.17/src/method.rs
third_party/rust/http-0.1.17/src/request.rs
third_party/rust/http-0.1.17/src/response.rs
third_party/rust/http-0.1.17/src/status.rs
third_party/rust/http-0.1.17/src/uri/authority.rs
third_party/rust/http-0.1.17/src/uri/builder.rs
third_party/rust/http-0.1.17/src/uri/mod.rs
third_party/rust/http-0.1.17/src/uri/path.rs
third_party/rust/http-0.1.17/src/uri/port.rs
third_party/rust/http-0.1.17/src/uri/scheme.rs
third_party/rust/http-0.1.17/src/uri/tests.rs
third_party/rust/http-0.1.17/src/version.rs
third_party/rust/http-0.1.17/tests/header_map.rs
third_party/rust/http-0.1.17/tests/header_map_fuzz.rs
third_party/rust/http-0.1.17/tests/status_code.rs
third_party/rust/http/.cargo-checksum.json
third_party/rust/http/CHANGELOG.md
third_party/rust/http/Cargo.toml
third_party/rust/http/benches/header_map/basic.rs
third_party/rust/http/benches/header_map/mod.rs
third_party/rust/http/benches/header_map/vec_map.rs
third_party/rust/http/benches/header_name.rs
third_party/rust/http/benches/header_value.rs
third_party/rust/http/benches/uri.rs
third_party/rust/http/src/byte_str.rs
third_party/rust/http/src/convert.rs
third_party/rust/http/src/error.rs
third_party/rust/http/src/extensions.rs
third_party/rust/http/src/header/map.rs
third_party/rust/http/src/header/mod.rs
third_party/rust/http/src/header/name.rs
third_party/rust/http/src/header/value.rs
third_party/rust/http/src/lib.rs
third_party/rust/http/src/method.rs
third_party/rust/http/src/request.rs
third_party/rust/http/src/response.rs
third_party/rust/http/src/status.rs
third_party/rust/http/src/uri/authority.rs
third_party/rust/http/src/uri/builder.rs
third_party/rust/http/src/uri/mod.rs
third_party/rust/http/src/uri/path.rs
third_party/rust/http/src/uri/port.rs
third_party/rust/http/src/uri/scheme.rs
third_party/rust/http/src/uri/tests.rs
third_party/rust/http/src/version.rs
third_party/rust/http/tests/header_map.rs
third_party/rust/http/tests/header_map_fuzz.rs
third_party/rust/http/tests/status_code.rs
copy from third_party/rust/bytes/.cargo-checksum.json
copy to third_party/rust/bytes-0.4.9/.cargo-checksum.json
copy from third_party/rust/bytes/CHANGELOG.md
copy to third_party/rust/bytes-0.4.9/CHANGELOG.md
copy from third_party/rust/bytes/Cargo.toml
copy to third_party/rust/bytes-0.4.9/Cargo.toml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes-0.4.9/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2018 Carl Lerche
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
copy from third_party/rust/bytes/README.md
copy to third_party/rust/bytes-0.4.9/README.md
copy from third_party/rust/bytes/benches/bytes.rs
copy to third_party/rust/bytes-0.4.9/benches/bytes.rs
rename from third_party/rust/bytes/ci/before_deploy.ps1
rename to third_party/rust/bytes-0.4.9/ci/before_deploy.ps1
rename from third_party/rust/bytes/ci/before_deploy.sh
rename to third_party/rust/bytes-0.4.9/ci/before_deploy.sh
rename from third_party/rust/bytes/ci/install.sh
rename to third_party/rust/bytes-0.4.9/ci/install.sh
rename from third_party/rust/bytes/ci/script.sh
rename to third_party/rust/bytes-0.4.9/ci/script.sh
copy from third_party/rust/bytes/ci/tsan
copy to third_party/rust/bytes-0.4.9/ci/tsan
rename from third_party/rust/bytes/src/buf/buf.rs
rename to third_party/rust/bytes-0.4.9/src/buf/buf.rs
copy from third_party/rust/bytes/src/buf/buf_mut.rs
copy to third_party/rust/bytes-0.4.9/src/buf/buf_mut.rs
rename from third_party/rust/bytes/src/buf/chain.rs
rename to third_party/rust/bytes-0.4.9/src/buf/chain.rs
rename from third_party/rust/bytes/src/buf/from_buf.rs
rename to third_party/rust/bytes-0.4.9/src/buf/from_buf.rs
rename from third_party/rust/bytes/src/buf/into_buf.rs
rename to third_party/rust/bytes-0.4.9/src/buf/into_buf.rs
copy from third_party/rust/bytes/src/buf/iter.rs
copy to third_party/rust/bytes-0.4.9/src/buf/iter.rs
copy from third_party/rust/bytes/src/buf/mod.rs
copy to third_party/rust/bytes-0.4.9/src/buf/mod.rs
rename from third_party/rust/bytes/src/buf/reader.rs
rename to third_party/rust/bytes-0.4.9/src/buf/reader.rs
rename from third_party/rust/bytes/src/buf/take.rs
rename to third_party/rust/bytes-0.4.9/src/buf/take.rs
rename from third_party/rust/bytes/src/buf/writer.rs
rename to third_party/rust/bytes-0.4.9/src/buf/writer.rs
copy from third_party/rust/bytes/src/bytes.rs
copy to third_party/rust/bytes-0.4.9/src/bytes.rs
copy from third_party/rust/bytes/src/debug.rs
copy to third_party/rust/bytes-0.4.9/src/debug.rs
copy from third_party/rust/bytes/src/lib.rs
copy to third_party/rust/bytes-0.4.9/src/lib.rs
copy from third_party/rust/bytes/src/serde.rs
copy to third_party/rust/bytes-0.4.9/src/serde.rs
copy from third_party/rust/bytes/tests/test_buf.rs
copy to third_party/rust/bytes-0.4.9/tests/test_buf.rs
copy from third_party/rust/bytes/tests/test_buf_mut.rs
copy to third_party/rust/bytes-0.4.9/tests/test_buf_mut.rs
copy from third_party/rust/bytes/tests/test_bytes.rs
copy to third_party/rust/bytes-0.4.9/tests/test_bytes.rs
copy from third_party/rust/bytes/tests/test_chain.rs
copy to third_party/rust/bytes-0.4.9/tests/test_chain.rs
copy from third_party/rust/bytes/tests/test_debug.rs
copy to third_party/rust/bytes-0.4.9/tests/test_debug.rs
rename from third_party/rust/bytes/tests/test_from_buf.rs
rename to third_party/rust/bytes-0.4.9/tests/test_from_buf.rs
copy from third_party/rust/bytes/tests/test_iter.rs
copy to third_party/rust/bytes-0.4.9/tests/test_iter.rs
copy from third_party/rust/bytes/tests/test_serde.rs
copy to third_party/rust/bytes-0.4.9/tests/test_serde.rs
copy from third_party/rust/bytes/tests/test_take.rs
copy to third_party/rust/bytes-0.4.9/tests/test_take.rs
--- a/third_party/rust/bytes/.cargo-checksum.json
+++ b/third_party/rust/bytes/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"CHANGELOG.md":"55941e30721c4b104cc8f84473da5acd0cd57903d66e8fd029b8c5160d99ed53","Cargo.toml":"f71e10b42ed8637ed615222f6d9e2af5df707f7f3d9d4fd203358c2af87b7ff0","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"3ca600d7b4175eee634621a870904fe5ec761e6fd623f745423d378dec1bfd51","benches/bytes.rs":"a60889c35cf76faf2b403f94d3ab2831a569f2e1f6e4cc4d5e88f3c26bddb8b0","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","ci/tsan":"905d22267f7493550d123b1482fc1a7f4b24e8cbc4ae4f0e0c2d42383e79ad83","src/buf/buf.rs":"1b5ff3ab694380fe59588b8d195111ba663c5f8901b272b531851deb26e4629a","src/buf/buf_mut.rs":"d2f54e9c64b86c8ddd325d40b3c8e1b2132d361937bac3b5fccb7a81154b89b8","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"b6e35d34533fae229f5209b95a39a1c35485f48a873a1d357d99218c486b0b95","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"546f2ef082656be2639314994d4228833f331747578a9ebf69075d2bcec0ae2d","src/debug.rs":"a8bd8062e7e500fdc5a79cb6c848fb860be8359d95e1c91034777fe33c78d54e","src/lib.rs":"fb61bba13236978f2c3b93cc39eb4a99c02f1ecd539c917a8380e5d344e67706","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"6409f32f734969bebeffa7592fed531953d252c5a639e422b6e4b14ec024b1d5","tests/test_buf_mut.rs":"a6a653d5053340b0254900c33e36df6db1421f821c3e985be0044b1b447ecedc","tests/test_bytes.rs":"92ae28671dee4ab91c7e0366e094b009c547defd8fd1c977520e5ad574eea70d","tests/test_chain.rs":"3fe1f28f3bce4377f8ed506718f95f3ed3ebaf251a1cb43b2705331e3dd6b43a","tests/test_debug.rs":"4cfd44c30d0b8f7c5eb8e8916ad7436e9f538732fe9f4b696dc22b84c31ac64a","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"4faf2b723ed25868249363523d3506a939810e53877d7a68f72b705564e7200a","Cargo.toml":"52ab465c70fd369a72545d6fd12f5700edf6b741bfb26ef49076cd74770301a8","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"c2aac235762c99395ae437e5f561d135a06e390a98f74baf80fb4c71dfb91ece","azure-pipelines.yml":"80098a973fbec019ae6da61ffe075047371ebb574acf4e726379628449a77016","benches/buf.rs":"7cfbe40095c70dfc42ebe1ed2cb59c84b557a89c09e8925842efd76be226bd12","benches/bytes.rs":"dd7a4c89e1bb1d7490d0532e19c50634c118bfbfd32d5b1c189b4f96fcce381e","benches/bytes_mut.rs":"e2510665597135634c96fcb85e11519a8cf0363d51460d87821229cf1745b16b","ci/azure-cross-compile.yml":"93d711ef0d66262f762624f82deb0b61afd69637e9a6cfe38d18ad84cd09781d","ci/azure-deploy-docs.yml":"fce86e75cb8bc61aca7513cd6afa5ebe0fff8963beda7d6775e341945bec7eb2","ci/azure-install-rust.yml":"898f3dd92859375bdc14b7449a9da56860936d0e77e9de5d2505663d22abd95e","ci/azure-loom.yml":"c1e8782e855b27d26c022bcf2b34239ed5b0a7de2802de68fd7140566e175317","ci/azure-test-stable.yml":"e8a264a813f17b62db1ca1c7e34ba1842a87cdc5ad4a591c1643af0a8a4057f6","ci/azure-tsan.yml":"3996de625bf276ee16cc815809a3c312d5e9fe62424c38d2e1bc97614caf7df3","ci/tsan":"5194270c4e37b1a72e890c98eb2a4aae5f5506fb26a67af3d2834360d2e3d3c2","src/buf/buf_impl.rs":"d921c3171094f824bba4ec3bd69f07ce47257af257741a3cb0ff96887f5f5bd0","src/buf/buf_mut.rs":"289a9348aa2788e0cc12419d311c89c0e87a5e84d62bd8cd71f045924eb0349f","src/buf/ext/chain.rs":"d526cd39d870b7ae8c08e3bd2bc9e7770e9d014b9d9360246dd42c236b6400db","src/buf/ext/limit.rs":"99a42933ac6e309ee5b87818f9560ff041a3e388e8cef18b78ccfd00e3c5eec9","src/buf/ext/mod.rs":"aa2b370a4b44cd7c56ef7c5b07bdaf3723efe3cc465cef358df56433881503b3","src/buf/ext/reader.rs":"d48f07cb1ae0404a224162509fd356eb217b5f8ab020403467491445631616b1","src/buf/ext/take.rs":"fa1009c96175fc67a66f5a8d013140fed7cf0199fefe49bcd4ace82b7a82741b","src/buf/ext/writer.rs":"f01022d4589cee78e36c96032d01e68b6d559062d549e35132a3af869099a2d0","src/buf/iter.rs":"6de36052c0f428d912cea4055fd5c027038f70d369e881e42b6ada6aa9ea92c2","src/buf/mod.rs":"4f66903ca61fe88513c23664a4f33f26c00e4218fbc607e7f52981ba66b90456","src/buf/vec_deque.rs":"5a4063961d10380c1ab3681f8b3f6201112766d9f57a63e2861dc9f2b134668d","src/bytes.rs":"5af1de291faa0344fd7ebf6c1a5834f04aa9f9a7f1b405c8173c31819dd27ca2","src/bytes_mut.rs":"28af39ed6576df6be1c0e57d526ba4f7dd9d50d0d7b0767a3da54940f9fb3417","src/debug.rs":"0875de8307c223bce68e861bc78917e0ad7ef00d75966c0151a0b1aa83a6521a","src/hex.rs":"39c8ee531a45a25b8ef085b4279a9ba7f3b488e4d36c4f80d8769e04b1e51bfd","src/lib.rs":"7fedc5dee1f1d6968ccdccc84514003b1293a22a4b712b4557b49fa57d0752b2","src/loom.rs":"70263b3847d1e4960450a64cb34a87947eaa73755b45977d151265c13ebe4598","src/serde.rs":"c42e0644bed431852445433ac0d6e46f04891e40c046456350323dd3f7b8cf1c","tests/test_buf.rs":"dd3a83218bf5bcc277a8aa1c59c7ed6deeb7e752252b01bce5be4219e65a3e4f","tests/test_buf_mut.rs":"de50fcb03c984f299a84131829b72e351263541c592eec2c23e7ff4504c8e376","tests/test_bytes.rs":"a3b429df530ad90d450d236e893705b218b0319d61b074e9445715df56a15416","tests/test_chain.rs":"d3dab042b20b35e865af1101d78db002878a6604a0a4f4b7901bb1ee98f60684","tests/test_debug.rs":"5b425e056a32d0319d1857b54c88cf58952397bda6fee26b39c624d6c1444eee","tests/test_iter.rs":"95c531b984bcd9b60222b31558925f9662a38b409e731e4aaaafa904b1a64896","tests/test_reader.rs":"1b782d370c757dac14d59df1c4432a25fd8209cbe31b07fa4c380f5b82eec409","tests/test_serde.rs":"2cd4426bfa3a886745dd6958aab21c3493d1116b961acbbf35ec2866c2168a52","tests/test_take.rs":"998d16facf37fa0b2358e7aa42380279d4466d8dde2a3f8c1ae8a082bb37b180"},"package":"10004c15deb332055f7a4a208190aed362cf9a7c2f6ab70a305fba50e1105f38"}
\ No newline at end of file
--- a/third_party/rust/bytes/CHANGELOG.md
+++ b/third_party/rust/bytes/CHANGELOG.md
@@ -1,8 +1,77 @@
+# 0.5.3 (December 12, 2019)
+
+### Added
+- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337).
+
+### Fix
+- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec<u8>` (#341, #342).
+- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333).
+
+# 0.5.2 (November 27, 2019)
+
+### Added
+- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325).
+
+# 0.5.1 (November 25, 2019)
+
+### Fix
+- Growth documentation for `BytesMut` (#321)
+
+# 0.5.0 (November 25, 2019)
+
+### Fix
+- Potential overflow in `copy_to_slice`
+
+### Changed
+- Increased minimum supported Rust version to 1.39.
+- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298)
+- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns
+  `usize::MAX` (#316).
+- `BufMut::bytes_mut` returns `&mut [MaybeUninit<u8>]` to reflect the unknown
+  initialization state (#305).
+- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]`
+  respectively (#261).
+- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306).
+- `BufMutExt::limit` (#309).
+- `Bytes::slice` takes a `RangeBounds` argument (#265).
+- `Bytes::from_static` is now a `const fn` (#311).
+- A multitude of smaller performance optimizations.
+
+### Added
+- `no_std` support (#281).
+- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order.
+- `BorrowMut` implementation for `BytesMut` (#185).
+
+### Removed
+- `IntoBuf` (#288).
+- `Buf` implementation for `&str` (#301).
+- `byteorder` dependency (#280).
+- `iovec` dependency, use `std::IoSlice` instead (#263).
+- optional `either` dependency (#315).
+- optional `i128` feature -- now available on stable. (#276).
+
+# 0.4.12 (March 6, 2019)
+
+### Added
+- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244).
+- Implement `Buf` for `VecDeque` (#249).
+
+# 0.4.11 (November 17, 2018)
+
+* Use raw pointers for potentially racy loads (#233).
+* Implement `BufRead` for `buf::Reader` (#232).
+* Documentation tweaks (#234).
+
+# 0.4.10 (September 4, 2018)
+
+* impl `Buf` and `BufMut` for `Either` (#225).
+* Add `Bytes::slice_ref` (#208).
+
 # 0.4.9 (July 12, 2018)
 
 * Add 128 bit number support behind a feature flag (#209).
 * Implement `IntoBuf` for `&mut [u8]`
 
 # 0.4.8 (May 25, 2018)
 
 * Fix panic in `BytesMut` `FromIterator` implementation.
--- a/third_party/rust/bytes/Cargo.toml
+++ b/third_party/rust/bytes/Cargo.toml
@@ -1,41 +1,36 @@
 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
 #
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
+# to registry (e.g., crates.io) dependencies
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
+edition = "2018"
 name = "bytes"
-version = "0.4.9"
-authors = ["Carl Lerche <me@carllerche.com>"]
-exclude = [".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*"]
+version = "0.5.3"
+authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
 description = "Types and traits for working with bytes"
-homepage = "https://github.com/carllerche/bytes"
-documentation = "https://carllerche.github.io/bytes/bytes"
+documentation = "https://docs.rs/bytes"
 readme = "README.md"
 keywords = ["buffers", "zero-copy", "io"]
 categories = ["network-programming", "data-structures"]
 license = "MIT"
-repository = "https://github.com/carllerche/bytes"
-[package.metadata.docs.rs]
-features = ["i128"]
-[dependencies.byteorder]
-version = "1.1.0"
-
-[dependencies.iovec]
-version = "0.1"
-
+repository = "https://github.com/tokio-rs/bytes"
 [dependencies.serde]
 version = "1.0"
 optional = true
+[dev-dependencies.loom]
+version = "0.2.10"
+
 [dev-dependencies.serde_test]
 version = "1.0"
 
 [features]
-i128 = ["byteorder/i128"]
+default = ["std"]
+std = []
--- a/third_party/rust/bytes/README.md
+++ b/third_party/rust/bytes/README.md
@@ -1,41 +1,44 @@
 # Bytes
 
 A utility library for working with bytes.
 
-[![Crates.io](https://img.shields.io/crates/v/bytes.svg?maxAge=2592000)](https://crates.io/crates/bytes)
-[![Build Status](https://travis-ci.org/carllerche/bytes.svg?branch=master)](https://travis-ci.org/carllerche/bytes)
+[![Crates.io][crates-badge]][crates-url]
+[![Build Status][azure-badge]][azure-url]
 
-[Documentation](https://carllerche.github.io/bytes/bytes/index.html)
+[crates-badge]: https://img.shields.io/crates/v/bytes.svg
+[crates-url]: https://crates.io/crates/bytes
+[azure-badge]: https://dev.azure.com/tokio-rs/bytes/_apis/build/status/tokio-rs.bytes?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/bytes/_build/latest?definitionId=3&branchName=master
+
+[Documentation](https://docs.rs/bytes)
 
 ## Usage
 
 To use `bytes`, first add this to your `Cargo.toml`:
 
 ```toml
 [dependencies]
-bytes = "0.4"
+bytes = "0.5"
 ```
 
 Next, add this to your crate:
 
 ```rust
-extern crate bytes;
-
 use bytes::{Bytes, BytesMut, Buf, BufMut};
 ```
 
 ## Serde support
 
 Serde support is optional and disabled by default. To enable use the feature `serde`.
 
 ```toml
 [dependencies]
-bytes = { version = "0.4", features = ["serde"] }
+bytes = { version = "0.5", features = ["serde"] }
 ```
 
 ## License
 
 This project is licensed under the [MIT license](LICENSE).
 
 ### Contribution
 
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/azure-pipelines.yml
@@ -0,0 +1,68 @@
+trigger: ["master"]
+pr: ["master"]
+
+jobs:
+# Check formatting
+# - template: ci/azure-rustfmt.yml
+#   parameters:
+#     name: rustfmt
+
+# Apply clippy lints
+# - template: ci/azure-clippy.yml
+#   parameters:
+#     name: clippy
+
+# This represents the minimum Rust version supported by
+# Bytes. Updating this should be done in a dedicated PR.
+#
+# Tests are not run as tests may require newer versions of
+# rust.
+- template: ci/azure-test-stable.yml
+  parameters:
+    name: minrust
+    rust_version: 1.39.0
+    cmd: check
+
+# Stable
+- template: ci/azure-test-stable.yml
+  parameters:
+    name: stable
+    cross: true
+    features:
+      - serde
+
+# Nightly
+- template: ci/azure-test-stable.yml
+  parameters:
+    name: nightly
+    # Pin nightly to avoid being impacted by breakage
+    rust_version: nightly-2019-09-25
+    benches: true
+
+# Run tests on some extra platforms
+- template: ci/azure-cross-compile.yml
+  parameters:
+    name: cross
+
+# Sanitizers
+- template: ci/azure-tsan.yml
+  parameters:
+    name: tsan
+    rust_version: nightly
+
+# Loom
+- template: ci/azure-loom.yml
+  parameters:
+    name: loom
+    rust_version: stable
+
+
+- template: ci/azure-deploy-docs.yml
+  parameters:
+    dependsOn:
+      # - rustfmt
+      # - clippy
+      - stable
+      - nightly
+      - minrust
+      - cross
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/benches/buf.rs
@@ -0,0 +1,187 @@
+#![feature(test)]
+#![deny(warnings, rust_2018_idioms)]
+
+extern crate test;
+
+use test::Bencher;
+use bytes::Buf;
+
+/// Dummy Buf implementation
+struct TestBuf {
+    buf: &'static [u8],
+    readlens: &'static [usize],
+    init_pos: usize,
+    pos: usize,
+    readlen_pos: usize,
+    readlen: usize,
+}
+impl TestBuf {
+    fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf {
+        let mut buf = TestBuf {
+            buf,
+            readlens,
+            init_pos,
+            pos: 0,
+            readlen_pos: 0,
+            readlen: 0,
+        };
+        buf.reset();
+        buf
+    }
+    fn reset(&mut self) {
+        self.pos = self.init_pos;
+        self.readlen_pos = 0;
+        self.next_readlen();
+    }
+    /// Compute the length of the next read :
+    /// - use the next value specified in readlens (capped by remaining) if any
+    /// - else the remaining
+    fn next_readlen(&mut self) {
+        self.readlen = self.buf.len() - self.pos;
+        if let Some(readlen) = self.readlens.get(self.readlen_pos) {
+            self.readlen = std::cmp::min(self.readlen, *readlen);
+            self.readlen_pos += 1;
+        }
+    }
+}
+impl Buf for TestBuf {
+    fn remaining(&self) -> usize {
+        return self.buf.len() - self.pos;
+    }
+    fn advance(&mut self, cnt: usize) {
+        self.pos += cnt;
+        assert!(self.pos <= self.buf.len());
+        self.next_readlen();
+    }
+    fn bytes(&self) -> &[u8] {
+        if self.readlen == 0 {
+            Default::default()
+        } else {
+            &self.buf[self.pos..self.pos + self.readlen]
+        }
+    }
+}
+
+/// Dummy Buf implementation
+///  version with methods forced to not be inlined (to simulate costly calls)
+struct TestBufC {
+    inner: TestBuf,
+}
+impl TestBufC {
+    fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC {
+        TestBufC {
+            inner: TestBuf::new(buf, readlens, init_pos),
+        }
+    }
+    fn reset(&mut self) {
+        self.inner.reset()
+    }
+}
+impl Buf for TestBufC {
+    #[inline(never)]
+    fn remaining(&self) -> usize {
+        self.inner.remaining()
+    }
+    #[inline(never)]
+    fn advance(&mut self, cnt: usize) {
+        self.inner.advance(cnt)
+    }
+    #[inline(never)]
+    fn bytes(&self) -> &[u8] {
+        self.inner.bytes()
+    }
+}
+
+macro_rules! bench {
+    ($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => (
+        #[bench]
+        fn $fname(b: &mut Bencher) {
+            let mut bufs = [
+                $testbuf::new(&[1u8; 8+0], $readlens, 0),
+                $testbuf::new(&[1u8; 8+1], $readlens, 1),
+                $testbuf::new(&[1u8; 8+2], $readlens, 2),
+                $testbuf::new(&[1u8; 8+3], $readlens, 3),
+                $testbuf::new(&[1u8; 8+4], $readlens, 4),
+                $testbuf::new(&[1u8; 8+5], $readlens, 5),
+                $testbuf::new(&[1u8; 8+6], $readlens, 6),
+                $testbuf::new(&[1u8; 8+7], $readlens, 7),
+            ];
+            b.iter(|| {
+                for i in 0..8 {
+                    bufs[i].reset();
+                    let buf: &mut dyn Buf =  &mut bufs[i]; // type erasure
+                    test::black_box(buf.$method($($arg,)*));
+                }
+            })
+        }
+    );
+    ($fname:ident, slice, $method:ident $(,$arg:expr)*) => (
+        #[bench]
+        fn $fname(b: &mut Bencher) {
+            // buf must be long enough for one read of 8 bytes starting at pos 7
+            let arr = [1u8; 8+7];
+            b.iter(|| {
+                for i in 0..8 {
+                    let mut buf = &arr[i..];
+                    let buf = &mut buf as &mut dyn Buf; // type erasure
+                    test::black_box(buf.$method($($arg,)*));
+                }
+            })
+        }
+    );
+    ($fname:ident, option) => (
+        #[bench]
+        fn $fname(b: &mut Bencher) {
+            let data = [1u8; 1];
+            b.iter(|| {
+                for _ in 0..8 {
+                    let mut buf = Some(data);
+                    let buf = &mut buf as &mut dyn Buf; // type erasure
+                    test::black_box(buf.get_u8());
+                }
+            })
+        }
+    );
+}
+
+macro_rules! bench_group {
+    ($method:ident $(,$arg:expr)*) => (
+        bench!(slice, slice, $method $(,$arg)*);
+        bench!(tbuf_1,        testbuf TestBuf  &[],  $method $(,$arg)*);
+        bench!(tbuf_1_costly, testbuf TestBufC &[],  $method $(,$arg)*);
+        bench!(tbuf_2,        testbuf TestBuf  &[1], $method $(,$arg)*);
+        bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*);
+        // bench!(tbuf_onebyone,        testbuf TestBuf  &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
+        // bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
+    );
+}
+
+mod get_u8 {
+    use super::*;
+    bench_group!(get_u8);
+    bench!(option, option);
+}
+mod get_u16 {
+    use super::*;
+    bench_group!(get_u16);
+}
+mod get_u32 {
+    use super::*;
+    bench_group!(get_u32);
+}
+mod get_u64 {
+    use super::*;
+    bench_group!(get_u64);
+}
+mod get_f32 {
+    use super::*;
+    bench_group!(get_f32);
+}
+mod get_f64 {
+    use super::*;
+    bench_group!(get_f64);
+}
+mod get_uint24 {
+    use super::*;
+    bench_group!(get_uint, 3);
+}
--- a/third_party/rust/bytes/benches/bytes.rs
+++ b/third_party/rust/bytes/benches/bytes.rs
@@ -1,250 +1,118 @@
 #![feature(test)]
+#![deny(warnings, rust_2018_idioms)]
 
-extern crate bytes;
 extern crate test;
 
 use test::Bencher;
-use bytes::{Bytes, BytesMut, BufMut};
-
-#[bench]
-fn alloc_small(b: &mut Bencher) {
-    b.iter(|| {
-        for _ in 0..1024 {
-            test::black_box(BytesMut::with_capacity(12));
-        }
-    })
-}
-
-#[bench]
-fn alloc_mid(b: &mut Bencher) {
-    b.iter(|| {
-        test::black_box(BytesMut::with_capacity(128));
-    })
-}
-
-#[bench]
-fn alloc_big(b: &mut Bencher) {
-    b.iter(|| {
-        test::black_box(BytesMut::with_capacity(4096));
-    })
-}
-
-#[bench]
-fn split_off_and_drop(b: &mut Bencher) {
-    b.iter(|| {
-        for _ in 0..1024 {
-            let v = vec![10; 200];
-            let mut b = Bytes::from(v);
-            test::black_box(b.split_off(100));
-            test::black_box(b);
-        }
-    })
-}
+use bytes::Bytes;
 
 #[bench]
 fn deref_unique(b: &mut Bencher) {
-    let mut buf = BytesMut::with_capacity(4096);
-    buf.put(&[0u8; 1024][..]);
+    let buf = Bytes::from(vec![0; 1024]);
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&buf[..]);
         }
     })
 }
 
 #[bench]
-fn deref_unique_unroll(b: &mut Bencher) {
-    let mut buf = BytesMut::with_capacity(4096);
-    buf.put(&[0u8; 1024][..]);
-
-    b.iter(|| {
-        for _ in 0..128 {
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-            test::black_box(&buf[..]);
-        }
-    })
-}
-
-#[bench]
 fn deref_shared(b: &mut Bencher) {
-    let mut buf = BytesMut::with_capacity(4096);
-    buf.put(&[0u8; 1024][..]);
-    let _b2 = buf.split_off(1024);
+    let buf = Bytes::from(vec![0; 1024]);
+    let _b2 = buf.clone();
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&buf[..]);
         }
     })
 }
 
 #[bench]
-fn deref_inline(b: &mut Bencher) {
-    let mut buf = BytesMut::with_capacity(8);
-    buf.put(&[0u8; 8][..]);
+fn deref_static(b: &mut Bencher) {
+    let buf = Bytes::from_static(b"hello world");
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&buf[..]);
         }
     })
 }
 
 #[bench]
-fn deref_two(b: &mut Bencher) {
-    let mut buf1 = BytesMut::with_capacity(8);
-    buf1.put(&[0u8; 8][..]);
-
-    let mut buf2 = BytesMut::with_capacity(4096);
-    buf2.put(&[0u8; 1024][..]);
-
-    b.iter(|| {
-        for _ in 0..512 {
-            test::black_box(&buf1[..]);
-            test::black_box(&buf2[..]);
-        }
-    })
-}
-
-#[bench]
-fn clone_inline(b: &mut Bencher) {
-    let bytes = Bytes::from_static(b"hello world");
-
-    b.iter(|| {
-        for _ in 0..1024 {
-            test::black_box(&bytes.clone());
-        }
-    })
-}
-
-#[bench]
 fn clone_static(b: &mut Bencher) {
     let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes());
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&bytes.clone());
         }
     })
 }
 
 #[bench]
-fn clone_arc(b: &mut Bencher) {
-    let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes());
+fn clone_shared(b: &mut Bencher) {
+    let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&bytes.clone());
         }
     })
 }
 
 #[bench]
-fn alloc_write_split_to_mid(b: &mut Bencher) {
-    b.iter(|| {
-        let mut buf = BytesMut::with_capacity(128);
-        buf.put_slice(&[0u8; 64]);
-        test::black_box(buf.split_to(64));
-    })
-}
-
-#[bench]
-fn drain_write_drain(b: &mut Bencher) {
-    let data = [0u8; 128];
+fn clone_arc_vec(b: &mut Bencher) {
+    use std::sync::Arc;
+    let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
 
     b.iter(|| {
-        let mut buf = BytesMut::with_capacity(1024);
-        let mut parts = Vec::with_capacity(8);
-
-        for _ in 0..8 {
-            buf.put(&data[..]);
-            parts.push(buf.split_to(128));
+        for _ in 0..1024 {
+            test::black_box(&bytes.clone());
         }
-
-        test::black_box(parts);
-    })
-}
-
-#[bench]
-fn fmt_write(b: &mut Bencher) {
-    use std::fmt::Write;
-    let mut buf = BytesMut::with_capacity(128);
-    let s = "foo bar baz quux lorem ipsum dolor et";
-
-    b.bytes = s.len() as u64;
-    b.iter(|| {
-        let _ = write!(buf, "{}", s);
-        test::black_box(&buf);
-        unsafe { buf.set_len(0); }
     })
 }
 
 #[bench]
 fn from_long_slice(b: &mut Bencher) {
     let data = [0u8; 128];
     b.bytes = data.len() as u64;
     b.iter(|| {
-        let buf = BytesMut::from(&data[..]);
+        let buf = Bytes::copy_from_slice(&data[..]);
         test::black_box(buf);
     })
 }
 
 #[bench]
 fn slice_empty(b: &mut Bencher) {
     b.iter(|| {
         let b = Bytes::from(vec![17; 1024]).clone();
         for i in 0..1000 {
-            test::black_box(b.slice(i % 100, i % 100));
+            test::black_box(b.slice(i % 100..i % 100));
         }
     })
 }
 
 #[bench]
 fn slice_short_from_arc(b: &mut Bencher) {
     b.iter(|| {
         // `clone` is to convert to ARC
         let b = Bytes::from(vec![17; 1024]).clone();
         for i in 0..1000 {
-            test::black_box(b.slice(1, 2 + i % 10));
-        }
-    })
-}
-
-// Keep in sync with bytes.rs
-#[cfg(target_pointer_width = "64")]
-const INLINE_CAP: usize = 4 * 8 - 1;
-#[cfg(target_pointer_width = "32")]
-const INLINE_CAP: usize = 4 * 4 - 1;
-
-#[bench]
-fn slice_avg_le_inline_from_arc(b: &mut Bencher) {
-    b.iter(|| {
-        // `clone` is to convert to ARC
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            // [1, INLINE_CAP]
-            let len = 1 + i % (INLINE_CAP - 1);
-            test::black_box(b.slice(i % 10, i % 10 + len));
+            test::black_box(b.slice(1..2 + i % 10));
         }
     })
 }
 
 #[bench]
-fn slice_large_le_inline_from_arc(b: &mut Bencher) {
+fn split_off_and_drop(b: &mut Bencher) {
     b.iter(|| {
-        // `clone` is to convert to ARC
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            // [INLINE_CAP - 10, INLINE_CAP]
-            let len = INLINE_CAP - 9 + i % 10;
-            test::black_box(b.slice(i % 10, i % 10 + len));
+        for _ in 0..1024 {
+            let v = vec![10; 200];
+            let mut b = Bytes::from(v);
+            test::black_box(b.split_off(100));
+            test::black_box(b);
         }
     })
 }
copy from third_party/rust/bytes/benches/bytes.rs
copy to third_party/rust/bytes/benches/bytes_mut.rs
--- a/third_party/rust/bytes/benches/bytes.rs
+++ b/third_party/rust/bytes/benches/bytes_mut.rs
@@ -1,15 +1,15 @@
 #![feature(test)]
+#![deny(warnings, rust_2018_idioms)]
 
-extern crate bytes;
 extern crate test;
 
 use test::Bencher;
-use bytes::{Bytes, BytesMut, BufMut};
+use bytes::{BufMut, BytesMut};
 
 #[bench]
 fn alloc_small(b: &mut Bencher) {
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(BytesMut::with_capacity(12));
         }
     })
@@ -24,27 +24,16 @@ fn alloc_mid(b: &mut Bencher) {
 
 #[bench]
 fn alloc_big(b: &mut Bencher) {
     b.iter(|| {
         test::black_box(BytesMut::with_capacity(4096));
     })
 }
 
-#[bench]
-fn split_off_and_drop(b: &mut Bencher) {
-    b.iter(|| {
-        for _ in 0..1024 {
-            let v = vec![10; 200];
-            let mut b = Bytes::from(v);
-            test::black_box(b.split_off(100));
-            test::black_box(b);
-        }
-    })
-}
 
 #[bench]
 fn deref_unique(b: &mut Bencher) {
     let mut buf = BytesMut::with_capacity(4096);
     buf.put(&[0u8; 1024][..]);
 
     b.iter(|| {
         for _ in 0..1024 {
@@ -81,68 +70,34 @@ fn deref_shared(b: &mut Bencher) {
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&buf[..]);
         }
     })
 }
 
 #[bench]
-fn deref_inline(b: &mut Bencher) {
-    let mut buf = BytesMut::with_capacity(8);
-    buf.put(&[0u8; 8][..]);
-
-    b.iter(|| {
-        for _ in 0..1024 {
-            test::black_box(&buf[..]);
-        }
-    })
-}
-
-#[bench]
 fn deref_two(b: &mut Bencher) {
     let mut buf1 = BytesMut::with_capacity(8);
     buf1.put(&[0u8; 8][..]);
 
     let mut buf2 = BytesMut::with_capacity(4096);
     buf2.put(&[0u8; 1024][..]);
 
     b.iter(|| {
         for _ in 0..512 {
             test::black_box(&buf1[..]);
             test::black_box(&buf2[..]);
         }
     })
 }
 
 #[bench]
-fn clone_inline(b: &mut Bencher) {
-    let bytes = Bytes::from_static(b"hello world");
-
-    b.iter(|| {
-        for _ in 0..1024 {
-            test::black_box(&bytes.clone());
-        }
-    })
-}
-
-#[bench]
-fn clone_static(b: &mut Bencher) {
-    let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes());
-
-    b.iter(|| {
-        for _ in 0..1024 {
-            test::black_box(&bytes.clone());
-        }
-    })
-}
-
-#[bench]
-fn clone_arc(b: &mut Bencher) {
-    let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes());
+fn clone_frozen(b: &mut Bencher) {
+    let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]).split().freeze();
 
     b.iter(|| {
         for _ in 0..1024 {
             test::black_box(&bytes.clone());
         }
     })
 }
 
@@ -182,69 +137,113 @@ fn fmt_write(b: &mut Bencher) {
     b.iter(|| {
         let _ = write!(buf, "{}", s);
         test::black_box(&buf);
         unsafe { buf.set_len(0); }
     })
 }
 
 #[bench]
-fn from_long_slice(b: &mut Bencher) {
-    let data = [0u8; 128];
-    b.bytes = data.len() as u64;
+fn bytes_mut_extend(b: &mut Bencher) {
+    let mut buf = BytesMut::with_capacity(256);
+    let data = [33u8; 32];
+
+    b.bytes = data.len() as u64 * 4;
     b.iter(|| {
-        let buf = BytesMut::from(&data[..]);
-        test::black_box(buf);
-    })
+        for _ in 0..4 {
+            buf.extend(&data);
+        }
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
+}
+
+// BufMut for BytesMut vs Vec<u8>
+
+#[bench]
+fn put_slice_bytes_mut(b: &mut Bencher) {
+    let mut buf = BytesMut::with_capacity(256);
+    let data = [33u8; 32];
+
+    b.bytes = data.len() as u64 * 4;
+    b.iter(|| {
+        for _ in 0..4 {
+            buf.put_slice(&data);
+        }
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
 }
 
 #[bench]
-fn slice_empty(b: &mut Bencher) {
+fn put_u8_bytes_mut(b: &mut Bencher) {
+    let mut buf = BytesMut::with_capacity(256);
+    let cnt = 128;
+
+    b.bytes = cnt as u64;
     b.iter(|| {
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            test::black_box(b.slice(i % 100, i % 100));
+        for _ in 0..cnt {
+            buf.put_u8(b'x');
         }
-    })
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
 }
 
 #[bench]
-fn slice_short_from_arc(b: &mut Bencher) {
-    b.iter(|| {
-        // `clone` is to convert to ARC
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            test::black_box(b.slice(1, 2 + i % 10));
-        }
-    })
-}
+fn put_slice_vec(b: &mut Bencher) {
+    let mut buf = Vec::<u8>::with_capacity(256);
+    let data = [33u8; 32];
 
-// Keep in sync with bytes.rs
-#[cfg(target_pointer_width = "64")]
-const INLINE_CAP: usize = 4 * 8 - 1;
-#[cfg(target_pointer_width = "32")]
-const INLINE_CAP: usize = 4 * 4 - 1;
-
-#[bench]
-fn slice_avg_le_inline_from_arc(b: &mut Bencher) {
+    b.bytes = data.len() as u64 * 4;
     b.iter(|| {
-        // `clone` is to convert to ARC
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            // [1, INLINE_CAP]
-            let len = 1 + i % (INLINE_CAP - 1);
-            test::black_box(b.slice(i % 10, i % 10 + len));
+        for _ in 0..4 {
+            buf.put_slice(&data);
         }
-    })
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
 }
 
 #[bench]
-fn slice_large_le_inline_from_arc(b: &mut Bencher) {
+fn put_u8_vec(b: &mut Bencher) {
+    let mut buf = Vec::<u8>::with_capacity(256);
+    let cnt = 128;
+
+    b.bytes = cnt as u64;
+    b.iter(|| {
+        for _ in 0..cnt {
+            buf.put_u8(b'x');
+        }
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
+}
+
+#[bench]
+fn put_slice_vec_extend(b: &mut Bencher) {
+    let mut buf = Vec::<u8>::with_capacity(256);
+    let data = [33u8; 32];
+
+    b.bytes = data.len() as u64 * 4;
     b.iter(|| {
-        // `clone` is to convert to ARC
-        let b = Bytes::from(vec![17; 1024]).clone();
-        for i in 0..1000 {
-            // [INLINE_CAP - 10, INLINE_CAP]
-            let len = INLINE_CAP - 9 + i % 10;
-            test::black_box(b.slice(i % 10, i % 10 + len));
+        for _ in 0..4 {
+            buf.extend_from_slice(&data);
         }
-    })
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
 }
+
+#[bench]
+fn put_u8_vec_push(b: &mut Bencher) {
+    let mut buf = Vec::<u8>::with_capacity(256);
+    let cnt = 128;
+
+    b.bytes = cnt as u64;
+    b.iter(|| {
+        for _ in 0..cnt {
+            buf.push(b'x');
+        }
+        test::black_box(&buf);
+        unsafe { buf.set_len(0); }
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-cross-compile.yml
@@ -0,0 +1,46 @@
+parameters:
+  cmd: build
+  rust_version: stable
+
+jobs:
+- job: ${{ parameters.name }}
+  displayName: Cross
+  strategy:
+    matrix:
+      i686:
+        vmImage: ubuntu-16.04
+        target: i686-unknown-linux-gnu
+      armv7:
+        vmImage: ubuntu-16.04
+        target: armv7-unknown-linux-gnueabihf
+      powerpc:
+        vmImage: ubuntu-16.04
+        target: powerpc-unknown-linux-gnu
+      powerpc64:
+        vmImage: ubuntu-16.04
+        target: powerpc64-unknown-linux-gnu
+      wasm:
+        vmImage: ubuntu-16.04
+        target: wasm32-unknown-unknown
+  pool:
+    vmImage: $(vmImage)
+
+  steps:
+    - template: azure-install-rust.yml
+      parameters:
+        rust_version: ${{parameters.rust_version}}
+
+    - script: cargo install cross
+      displayName: Install cross
+      condition: not(eq(variables['target'], 'wasm32-unknown-unknown'))
+
+    - script: cross ${{ parameters.cmd }} --target $(target)
+      displayName: cross ${{ parameters.cmd }} --target $(target)
+      condition: not(eq(variables['target'], 'wasm32-unknown-unknown'))
+
+    # WASM support
+    - script: |
+        rustup target add $(target)
+        cargo build --target $(target)
+      displayName: cargo build --target $(target)
+      condition: eq(variables['target'], 'wasm32-unknown-unknown')
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-deploy-docs.yml
@@ -0,0 +1,39 @@
+parameters:
+  dependsOn: []
+
+jobs:
+- job: documentation
+  displayName: 'Deploy API Documentation'
+  condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master'))
+  pool:
+    vmImage: 'Ubuntu 16.04'
+  dependsOn:
+    - ${{ parameters.dependsOn }}
+  steps:
+  - template: azure-install-rust.yml
+    parameters:
+      rust_version: stable
+  - script: |
+      cargo doc --no-deps
+      cp -R target/doc '$(Build.BinariesDirectory)'
+    displayName: 'Generate Documentation'
+  - script: |
+      set -e
+
+      git --version
+      ls -la
+      git init
+      git config user.name 'Deployment Bot (from Azure Pipelines)'
+      git config user.email 'deploy@tokio-rs.com'
+      git config --global credential.helper 'store --file ~/.my-credentials'
+      printf "protocol=https\nhost=github.com\nusername=carllerche\npassword=%s\n\n" "$GITHUB_TOKEN" | git credential-store --file ~/.my-credentials store
+      git remote add origin https://github.com/tokio-rs/bytes
+      git checkout -b gh-pages
+      git add .
+      git commit -m 'Deploy Bytes API documentation'
+      git push -f origin gh-pages
+    env:
+      GITHUB_TOKEN: $(githubPersonalToken)
+    workingDirectory: '$(Build.BinariesDirectory)'
+    displayName: 'Deploy Documentation'
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-install-rust.yml
@@ -0,0 +1,33 @@
+steps:
+  # Linux and macOS.
+  - script: |
+      set -e
+      curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain none
+      export PATH=$PATH:$HOME/.cargo/bin
+      rustup toolchain install $RUSTUP_TOOLCHAIN
+      rustup default $RUSTUP_TOOLCHAIN
+      echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin"
+    env:
+      RUSTUP_TOOLCHAIN: ${{parameters.rust_version}}
+    displayName: "Install rust (*nix)"
+    condition: not(eq(variables['Agent.OS'], 'Windows_NT'))
+
+  # Windows.
+  - script: |
+      curl -sSf -o rustup-init.exe https://win.rustup.rs
+      rustup-init.exe -y --default-toolchain none
+      set PATH=%PATH%;%USERPROFILE%\.cargo\bin
+      rustup toolchain install %RUSTUP_TOOLCHAIN%
+      rustup default %RUSTUP_TOOLCHAIN%
+      echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
+    env:
+      RUSTUP_TOOLCHAIN: ${{parameters.rust_version}}
+    displayName: "Install rust (windows)"
+    condition: eq(variables['Agent.OS'], 'Windows_NT')
+
+  # All platforms.
+  - script: |
+        rustup toolchain list
+        rustc -Vv
+        cargo -V
+    displayName: Query rust and cargo versions
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-loom.yml
@@ -0,0 +1,15 @@
+jobs:
+- job: ${{parameters.name}}
+  displayName: Loom tests
+  pool:
+    vmImage: ubuntu-16.04
+
+  steps:
+  - template: azure-install-rust.yml
+    parameters:
+      rust_version: ${{parameters.rust_version}}
+
+  - script: RUSTFLAGS="--cfg loom" cargo test --lib
+    displayName: RUSTFLAGS="--cfg loom" cargo test --lib
+
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-test-stable.yml
@@ -0,0 +1,50 @@
+parameters:
+  cmd: test
+  rust_version: stable
+  features: []
+
+jobs:
+- job: ${{ parameters.name }}
+  displayName: ${{ parameters.displayName }}
+  strategy:
+    matrix:
+      Linux:
+        vmImage: ubuntu-16.04
+
+      ${{ if parameters.cross }}:
+        MacOS:
+          vmImage: macOS-10.13
+        Windows:
+          vmImage: vs2017-win2016
+  pool:
+    vmImage: $(vmImage)
+
+  steps:
+  - template: azure-install-rust.yml
+    parameters:
+      rust_version: ${{parameters.rust_version}}
+
+  # Run with default crate features
+  - script: cargo ${{ parameters.cmd }}
+    displayName: cargo ${{ parameters.cmd }}
+
+  # Run with each specified feature
+  - ${{ each feature in parameters.features }}:
+    - script: cargo ${{ parameters.cmd }} --features ${{ feature }}
+      displayName: cargo ${{ parameters.cmd }} --features ${{ feature }}
+
+  - ${{ if eq(parameters.cmd, 'test') }}:
+    - script: cargo doc --no-deps
+      displayName: cargo doc --no-deps
+
+  - ${{ if parameters.benches }}:
+    - script: cargo check --benches
+      displayName: Check benchmarks
+
+  # Run with all features
+  - script: cargo ${{ parameters.cmd }} --all-features
+    displayName: cargo ${{ parameters.cmd }} --all-features
+
+  # Run with no default features
+  - script: cargo check --no-default-features
+    displayName: cargo check --no-default-features
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/ci/azure-tsan.yml
@@ -0,0 +1,26 @@
+jobs:
+- job: ${{ parameters.name }}
+  displayName: TSAN
+  pool:
+    vmImage: ubuntu-16.04
+
+  steps:
+  - template: azure-install-rust.yml
+    parameters:
+      rust_version: ${{ parameters.rust_version }}
+
+  - script: |
+      set -e
+
+      export RUST_TEST_THREADS=1
+      export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0"
+      export TSAN_OPTIONS="suppressions=`pwd`/ci/tsan"
+
+      # Run address sanitizer
+      RUSTFLAGS="-Z sanitizer=address" \
+      cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut
+
+      # Run thread sanitizer
+      RUSTFLAGS="-Z sanitizer=thread" \
+      cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut
+    displayName: TSAN / MSAN
--- a/third_party/rust/bytes/ci/tsan
+++ b/third_party/rust/bytes/ci/tsan
@@ -4,18 +4,21 @@
 # This causes many false positives.
 race:Arc*drop
 race:arc*Weak*drop
 
 # `std` mpsc is not used in any Bytes code base. This race is triggered by some
 # rust runtime logic.
 race:std*mpsc_queue
 
+# Some test runtime races. Allocation should be race free
+race:alloc::alloc
+
 # Not sure why this is warning, but it is in the test harness and not the library.
 race:TestEvent*clone
 race:test::run_tests_console::*closure
 
 # Probably more fences in std.
 race:__call_tls_dtors
 
-# `is_inline_or_static` is explicitly called concurrently without synchronization.
-# The safety explanation can be found in a comment.
-race:Inner::is_inline_or_static
+# This ignores a false positive caused by `thread::park()`/`thread::unpark()`.
+# See: https://github.com/rust-lang/rust/pull/54806#issuecomment-436193353
+race:pthread_cond_destroy
copy from third_party/rust/bytes/src/buf/buf.rs
copy to third_party/rust/bytes/src/buf/buf_impl.rs
--- a/third_party/rust/bytes/src/buf/buf.rs
+++ b/third_party/rust/bytes/src/buf/buf_impl.rs
@@ -1,186 +1,192 @@
-use super::{IntoBuf, Take, Reader, Iter, FromBuf, Chain};
-use byteorder::{BigEndian, ByteOrder, LittleEndian};
-use iovec::IoVec;
+use core::{cmp, ptr, mem};
 
-use std::{cmp, io, ptr};
+#[cfg(feature = "std")]
+use std::io::IoSlice;
+
+use alloc::{boxed::Box};
 
 macro_rules! buf_get_impl {
-    ($this:ident, $size:expr, $conv:path) => ({
+    ($this:ident, $typ:tt::$conv:tt) => ({
+        const SIZE: usize = mem::size_of::<$typ>();
          // try to convert directly from the bytes
-        let ret = {
-            // this Option<ret> trick is to avoid keeping a borrow on self
-            // when advance() is called (mut borrow) and to call bytes() only once
-            if let Some(src) = $this.bytes().get(..($size)) {
-                Some($conv(src))
-            } else {
-                None
-            }
-        };
+         // this Option<ret> trick is to avoid keeping a borrow on self
+         // when advance() is called (mut borrow) and to call bytes() only once
+        let ret =  $this.bytes().get(..SIZE).map(|src| unsafe {
+            $typ::$conv(*(src as *const _ as *const [_; SIZE]))
+        });
+
         if let Some(ret) = ret {
-             // if the direct convertion was possible, advance and return
-            $this.advance($size);
+             // if the direct conversion was possible, advance and return
+            $this.advance(SIZE);
             return ret;
         } else {
             // if not we copy the bytes in a temp buffer then convert
-            let mut buf = [0; ($size)];
+            let mut buf = [0; SIZE];
             $this.copy_to_slice(&mut buf); // (do the advance)
-            return $conv(&buf);
+            return $typ::$conv(buf);
         }
     });
-    ($this:ident, $buf_size:expr, $conv:path, $len_to_read:expr) => ({
+    (le => $this:ident, $typ:tt, $len_to_read:expr) => ({
+        debug_assert!(mem::size_of::<$typ>() >= $len_to_read);
+
         // The same trick as above does not improve the best case speed.
         // It seems to be linked to the way the method is optimised by the compiler
-        let mut buf = [0; ($buf_size)];
+        let mut buf = [0; (mem::size_of::<$typ>())];
         $this.copy_to_slice(&mut buf[..($len_to_read)]);
-        return $conv(&buf[..($len_to_read)], $len_to_read);
+        return $typ::from_le_bytes(buf);
     });
+    (be => $this:ident, $typ:tt, $len_to_read:expr) => {{
+        debug_assert!(mem::size_of::<$typ>() >= $len_to_read);
+
+        let mut buf = [0; (mem::size_of::<$typ>())];
+        $this.copy_to_slice(&mut buf[mem::size_of::<$typ>()-($len_to_read)..]);
+        return $typ::from_be_bytes(buf);
+    }};
 }
 
 /// Read bytes from a buffer.
 ///
 /// A buffer stores bytes in memory such that read operations are infallible.
 /// The underlying storage may or may not be in contiguous memory. A `Buf` value
 /// is a cursor into the buffer. Reading from `Buf` advances the cursor
 /// position. It can be thought of as an efficient `Iterator` for collections of
 /// bytes.
 ///
-/// The simplest `Buf` is a `Cursor` wrapping a `[u8]`.
+/// The simplest `Buf` is a `&[u8]`.
 ///
 /// ```
 /// use bytes::Buf;
-/// use std::io::Cursor;
 ///
-/// let mut buf = Cursor::new(b"hello world");
+/// let mut buf = &b"hello world"[..];
 ///
 /// assert_eq!(b'h', buf.get_u8());
 /// assert_eq!(b'e', buf.get_u8());
 /// assert_eq!(b'l', buf.get_u8());
 ///
 /// let mut rest = [0; 8];
 /// buf.copy_to_slice(&mut rest);
 ///
-/// assert_eq!(&rest[..], b"lo world");
+/// assert_eq!(&rest[..], &b"lo world"[..]);
 /// ```
 pub trait Buf {
     /// Returns the number of bytes between the current position and the end of
     /// the buffer.
     ///
     /// This value is greater than or equal to the length of the slice returned
     /// by `bytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"hello world");
+    /// let mut buf = &b"hello world"[..];
     ///
     /// assert_eq!(buf.remaining(), 11);
     ///
     /// buf.get_u8();
     ///
     /// assert_eq!(buf.remaining(), 10);
     /// ```
     ///
     /// # Implementer notes
     ///
     /// Implementations of `remaining` should ensure that the return value does
     /// not change unless a call is made to `advance` or any other function that
     /// is documented to change the `Buf`'s current position.
     fn remaining(&self) -> usize;
 
     /// Returns a slice starting at the current position and of length between 0
-    /// and `Buf::remaining()`.
+    /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows
+    /// non-continuous internal representation).
     ///
     /// This is a lower level function. Most operations are done with other
     /// functions.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"hello world");
+    /// let mut buf = &b"hello world"[..];
     ///
-    /// assert_eq!(buf.bytes(), b"hello world");
+    /// assert_eq!(buf.bytes(), &b"hello world"[..]);
     ///
     /// buf.advance(6);
     ///
-    /// assert_eq!(buf.bytes(), b"world");
+    /// assert_eq!(buf.bytes(), &b"world"[..]);
     /// ```
     ///
     /// # Implementer notes
     ///
     /// This function should never panic. Once the end of the buffer is reached,
     /// i.e., `Buf::remaining` returns 0, calls to `bytes` should return an
     /// empty slice.
     fn bytes(&self) -> &[u8];
 
     /// Fills `dst` with potentially multiple slices starting at `self`'s
     /// current position.
     ///
-    /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vec` enables
-    /// fetching more than one slice at once. `dst` is a slice of `IoVec`
+    /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vectored` enables
+    /// fetching more than one slice at once. `dst` is a slice of `IoSlice`
     /// references, enabling the slice to be directly used with [`writev`]
     /// without any further conversion. The sum of the lengths of all the
     /// buffers in `dst` will be less than or equal to `Buf::remaining()`.
     ///
     /// The entries in `dst` will be overwritten, but the data **contained** by
-    /// the slices **will not** be modified. If `bytes_vec` does not fill every
+    /// the slices **will not** be modified. If `bytes_vectored` does not fill every
     /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices
     /// in `self.
     ///
     /// This is a lower level function. Most operations are done with other
     /// functions.
     ///
     /// # Implementer notes
     ///
     /// This function should never panic. Once the end of the buffer is reached,
-    /// i.e., `Buf::remaining` returns 0, calls to `bytes_vec` must return 0
+    /// i.e., `Buf::remaining` returns 0, calls to `bytes_vectored` must return 0
     /// without mutating `dst`.
     ///
     /// Implementations should also take care to properly handle being called
     /// with `dst` being a zero length slice.
     ///
     /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html
-    fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize {
+    #[cfg(feature = "std")]
+    fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
         if dst.is_empty() {
             return 0;
         }
 
         if self.has_remaining() {
-            dst[0] = self.bytes().into();
+            dst[0] = IoSlice::new(self.bytes());
             1
         } else {
             0
         }
     }
 
     /// Advance the internal cursor of the Buf
     ///
     /// The next call to `bytes` will return a slice starting `cnt` bytes
     /// further into the underlying buffer.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"hello world");
+    /// let mut buf = &b"hello world"[..];
     ///
-    /// assert_eq!(buf.bytes(), b"hello world");
+    /// assert_eq!(buf.bytes(), &b"hello world"[..]);
     ///
     /// buf.advance(6);
     ///
-    /// assert_eq!(buf.bytes(), b"world");
+    /// assert_eq!(buf.bytes(), &b"world"[..]);
     /// ```
     ///
     /// # Panics
     ///
     /// This function **may** panic if `cnt > self.remaining()`.
     ///
     /// # Implementer notes
     ///
@@ -194,19 +200,18 @@ pub trait Buf {
     /// Returns true if there are any more bytes to consume
     ///
     /// This is equivalent to `self.remaining() != 0`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"a");
+    /// let mut buf = &b"a"[..];
     ///
     /// assert!(buf.has_remaining());
     ///
     /// buf.get_u8();
     ///
     /// assert!(!buf.has_remaining());
     /// ```
     fn has_remaining(&self) -> bool {
@@ -217,23 +222,22 @@ pub trait Buf {
     ///
     /// The cursor is advanced by the number of bytes copied. `self` must have
     /// enough remaining bytes to fill `dst`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"hello world");
+    /// let mut buf = &b"hello world"[..];
     /// let mut dst = [0; 5];
     ///
     /// buf.copy_to_slice(&mut dst);
-    /// assert_eq!(b"hello", &dst);
+    /// assert_eq!(&b"hello"[..], &dst);
     /// assert_eq!(6, buf.remaining());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if `self.remaining() < dst.len()`
     fn copy_to_slice(&mut self, dst: &mut [u8]) {
         let mut off = 0;
@@ -245,34 +249,33 @@ pub trait Buf {
 
             unsafe {
                 let src = self.bytes();
                 cnt = cmp::min(src.len(), dst.len() - off);
 
                 ptr::copy_nonoverlapping(
                     src.as_ptr(), dst[off..].as_mut_ptr(), cnt);
 
-                off += src.len();
+                off += cnt;
             }
 
             self.advance(cnt);
         }
     }
 
     /// Gets an unsigned 8 bit integer from `self`.
     ///
     /// The current position is advanced by 1.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08 hello");
+    /// let mut buf = &b"\x08 hello"[..];
     /// assert_eq!(8, buf.get_u8());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is no more remaining data in `self`.
     fn get_u8(&mut self) -> u8 {
         assert!(self.remaining() >= 1);
@@ -284,844 +287,661 @@ pub trait Buf {
     /// Gets a signed 8 bit integer from `self`.
     ///
     /// The current position is advanced by 1.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08 hello");
+    /// let mut buf = &b"\x08 hello"[..];
     /// assert_eq!(8, buf.get_i8());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is no more remaining data in `self`.
     fn get_i8(&mut self) -> i8 {
         assert!(self.remaining() >= 1);
         let ret = self.bytes()[0] as i8;
         self.advance(1);
         ret
     }
 
-    #[doc(hidden)]
-    #[deprecated(note="use get_u16_be or get_u16_le")]
-    fn get_u16<T: ByteOrder>(&mut self) -> u16 where Self: Sized {
-        let mut buf = [0; 2];
-        self.copy_to_slice(&mut buf);
-        T::read_u16(&buf)
-    }
-
     /// Gets an unsigned 16 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x09 hello");
-    /// assert_eq!(0x0809, buf.get_u16_be());
+    /// let mut buf = &b"\x08\x09 hello"[..];
+    /// assert_eq!(0x0809, buf.get_u16());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_u16_be(&mut self) -> u16 {
-        buf_get_impl!(self, 2, BigEndian::read_u16);
+    fn get_u16(&mut self) -> u16 {
+        buf_get_impl!(self, u16::from_be_bytes);
     }
 
     /// Gets an unsigned 16 bit integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x09\x08 hello");
+    /// let mut buf = &b"\x09\x08 hello"[..];
     /// assert_eq!(0x0809, buf.get_u16_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_u16_le(&mut self) -> u16 {
-        buf_get_impl!(self, 2, LittleEndian::read_u16);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_i16_be or get_i16_le")]
-    fn get_i16<T: ByteOrder>(&mut self) -> i16 where Self: Sized {
-        let mut buf = [0; 2];
-        self.copy_to_slice(&mut buf);
-        T::read_i16(&buf)
+        buf_get_impl!(self, u16::from_le_bytes);
     }
 
     /// Gets a signed 16 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x09 hello");
-    /// assert_eq!(0x0809, buf.get_i16_be());
+    /// let mut buf = &b"\x08\x09 hello"[..];
+    /// assert_eq!(0x0809, buf.get_i16());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_i16_be(&mut self) -> i16 {
-        buf_get_impl!(self, 2, BigEndian::read_i16);
+    fn get_i16(&mut self) -> i16 {
+        buf_get_impl!(self, i16::from_be_bytes);
     }
 
     /// Gets a signed 16 bit integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x09\x08 hello");
+    /// let mut buf = &b"\x09\x08 hello"[..];
     /// assert_eq!(0x0809, buf.get_i16_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_i16_le(&mut self) -> i16 {
-        buf_get_impl!(self, 2, LittleEndian::read_i16);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_u32_be or get_u32_le")]
-    fn get_u32<T: ByteOrder>(&mut self) -> u32 where Self: Sized {
-        let mut buf = [0; 4];
-        self.copy_to_slice(&mut buf);
-        T::read_u32(&buf)
+        buf_get_impl!(self, i16::from_le_bytes);
     }
 
     /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello");
-    /// assert_eq!(0x0809A0A1, buf.get_u32_be());
+    /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_u32());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_u32_be(&mut self) -> u32 {
-        buf_get_impl!(self, 4, BigEndian::read_u32);
+    fn get_u32(&mut self) -> u32 {
+        buf_get_impl!(self, u32::from_be_bytes);
     }
 
     /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello");
+    /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
     /// assert_eq!(0x0809A0A1, buf.get_u32_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_u32_le(&mut self) -> u32 {
-        buf_get_impl!(self, 4, LittleEndian::read_u32);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_i32_be or get_i32_le")]
-    fn get_i32<T: ByteOrder>(&mut self) -> i32 where Self: Sized {
-        let mut buf = [0; 4];
-        self.copy_to_slice(&mut buf);
-        T::read_i32(&buf)
+        buf_get_impl!(self, u32::from_le_bytes);
     }
 
     /// Gets a signed 32 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello");
-    /// assert_eq!(0x0809A0A1, buf.get_i32_be());
+    /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..];
+    /// assert_eq!(0x0809A0A1, buf.get_i32());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_i32_be(&mut self) -> i32 {
-        buf_get_impl!(self, 4, BigEndian::read_i32);
+    fn get_i32(&mut self) -> i32 {
+        buf_get_impl!(self, i32::from_be_bytes);
     }
 
     /// Gets a signed 32 bit integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello");
+    /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..];
     /// assert_eq!(0x0809A0A1, buf.get_i32_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_i32_le(&mut self) -> i32 {
-        buf_get_impl!(self, 4, LittleEndian::read_i32);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_u64_be or get_u64_le")]
-    fn get_u64<T: ByteOrder>(&mut self) -> u64 where Self: Sized {
-        let mut buf = [0; 8];
-        self.copy_to_slice(&mut buf);
-        T::read_u64(&buf)
+        buf_get_impl!(self, i32::from_le_bytes);
     }
 
     /// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
-    /// assert_eq!(0x0102030405060708, buf.get_u64_be());
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_u64());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_u64_be(&mut self) -> u64 {
-        buf_get_impl!(self, 8, BigEndian::read_u64);
+    fn get_u64(&mut self) -> u64 {
+        buf_get_impl!(self, u64::from_be_bytes);
     }
 
     /// Gets an unsigned 64 bit integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello");
+    /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
     /// assert_eq!(0x0102030405060708, buf.get_u64_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_u64_le(&mut self) -> u64 {
-        buf_get_impl!(self, 8, LittleEndian::read_u64);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_i64_be or get_i64_le")]
-    fn get_i64<T: ByteOrder>(&mut self) -> i64 where Self: Sized {
-        let mut buf = [0; 8];
-        self.copy_to_slice(&mut buf);
-        T::read_i64(&buf)
+        buf_get_impl!(self, u64::from_le_bytes);
     }
 
     /// Gets a signed 64 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
-    /// assert_eq!(0x0102030405060708, buf.get_i64_be());
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..];
+    /// assert_eq!(0x0102030405060708, buf.get_i64());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_i64_be(&mut self) -> i64 {
-        buf_get_impl!(self, 8, BigEndian::read_i64);
+    fn get_i64(&mut self) -> i64 {
+        buf_get_impl!(self, i64::from_be_bytes);
     }
 
     /// Gets a signed 64 bit integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello");
+    /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
     /// assert_eq!(0x0102030405060708, buf.get_i64_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_i64_le(&mut self) -> i64 {
-        buf_get_impl!(self, 8, LittleEndian::read_i64);
+        buf_get_impl!(self, i64::from_le_bytes);
     }
 
     /// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello");
-    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_be());
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    #[cfg(feature = "i128")]
-    fn get_u128_be(&mut self) -> u128 {
-        buf_get_impl!(self, 16, BigEndian::read_u128);
+    fn get_u128(&mut self) -> u128 {
+        buf_get_impl!(self, u128::from_be_bytes);
     }
 
     /// Gets an unsigned 128 bit integer from `self` in little-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello");
+    /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
     /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    #[cfg(feature = "i128")]
     fn get_u128_le(&mut self) -> u128 {
-        buf_get_impl!(self, 16, LittleEndian::read_u128);
+        buf_get_impl!(self, u128::from_le_bytes);
     }
 
     /// Gets a signed 128 bit integer from `self` in big-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello");
-    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_be());
+    /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..];
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    #[cfg(feature = "i128")]
-    fn get_i128_be(&mut self) -> i128 {
-        buf_get_impl!(self, 16, BigEndian::read_i128);
+    fn get_i128(&mut self) -> i128 {
+        buf_get_impl!(self, i128::from_be_bytes);
     }
 
     /// Gets a signed 128 bit integer from `self` in little-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello");
+    /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..];
     /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    #[cfg(feature = "i128")]
     fn get_i128_le(&mut self) -> i128 {
-        buf_get_impl!(self, 16, LittleEndian::read_i128);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_uint_be or get_uint_le")]
-    fn get_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 where Self: Sized {
-        let mut buf = [0; 8];
-        self.copy_to_slice(&mut buf[..nbytes]);
-        T::read_uint(&buf[..nbytes], nbytes)
+        buf_get_impl!(self, i128::from_le_bytes);
     }
 
     /// Gets an unsigned n-byte integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Buf, BigEndian};
-    /// use std::io::Cursor;
+    /// use bytes::Buf;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
-    /// assert_eq!(0x010203, buf.get_uint_be(3));
+    /// let mut buf = &b"\x01\x02\x03 hello"[..];
+    /// assert_eq!(0x010203, buf.get_uint(3));
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_uint_be(&mut self, nbytes: usize) -> u64 {
-        buf_get_impl!(self, 8, BigEndian::read_uint, nbytes);
+    fn get_uint(&mut self, nbytes: usize) -> u64 {
+        buf_get_impl!(be => self, u64, nbytes);
     }
 
     /// Gets an unsigned n-byte integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x03\x02\x01 hello");
+    /// let mut buf = &b"\x03\x02\x01 hello"[..];
     /// assert_eq!(0x010203, buf.get_uint_le(3));
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_uint_le(&mut self, nbytes: usize) -> u64 {
-        buf_get_impl!(self, 8, LittleEndian::read_uint, nbytes);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_int_be or get_int_le")]
-    fn get_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 where Self: Sized {
-        let mut buf = [0; 8];
-        self.copy_to_slice(&mut buf[..nbytes]);
-        T::read_int(&buf[..nbytes], nbytes)
+        buf_get_impl!(le => self, u64, nbytes);
     }
 
     /// Gets a signed n-byte integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
-    /// assert_eq!(0x010203, buf.get_int_be(3));
+    /// let mut buf = &b"\x01\x02\x03 hello"[..];
+    /// assert_eq!(0x010203, buf.get_int(3));
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_int_be(&mut self, nbytes: usize) -> i64 {
-        buf_get_impl!(self, 8, BigEndian::read_int, nbytes);
+    fn get_int(&mut self, nbytes: usize) -> i64 {
+        buf_get_impl!(be => self, i64, nbytes);
     }
 
     /// Gets a signed n-byte integer from `self` in little-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x03\x02\x01 hello");
+    /// let mut buf = &b"\x03\x02\x01 hello"[..];
     /// assert_eq!(0x010203, buf.get_int_le(3));
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_int_le(&mut self, nbytes: usize) -> i64 {
-        buf_get_impl!(self, 8, LittleEndian::read_int, nbytes);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_f32_be or get_f32_le")]
-    fn get_f32<T: ByteOrder>(&mut self) -> f32 where Self: Sized {
-        let mut buf = [0; 4];
-        self.copy_to_slice(&mut buf);
-        T::read_f32(&buf)
+        buf_get_impl!(le => self, i64, nbytes);
     }
 
     /// Gets an IEEE754 single-precision (4 bytes) floating point number from
     /// `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello");
-    /// assert_eq!(1.2f32, buf.get_f32_be());
+    /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..];
+    /// assert_eq!(1.2f32, buf.get_f32());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_f32_be(&mut self) -> f32 {
-        buf_get_impl!(self, 4, BigEndian::read_f32);
+    fn get_f32(&mut self) -> f32 {
+        f32::from_bits(Self::get_u32(self))
     }
 
     /// Gets an IEEE754 single-precision (4 bytes) floating point number from
     /// `self` in little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x9A\x99\x99\x3F hello");
+    /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..];
     /// assert_eq!(1.2f32, buf.get_f32_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_f32_le(&mut self) -> f32 {
-        buf_get_impl!(self, 4, LittleEndian::read_f32);
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use get_f64_be or get_f64_le")]
-    fn get_f64<T: ByteOrder>(&mut self) -> f64 where Self: Sized {
-        let mut buf = [0; 8];
-        self.copy_to_slice(&mut buf);
-        T::read_f64(&buf)
+        f32::from_bits(Self::get_u32_le(self))
     }
 
     /// Gets an IEEE754 double-precision (8 bytes) floating point number from
     /// `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello");
-    /// assert_eq!(1.2f64, buf.get_f64_be());
+    /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..];
+    /// assert_eq!(1.2f64, buf.get_f64());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
-    fn get_f64_be(&mut self) -> f64 {
-        buf_get_impl!(self, 8, BigEndian::read_f64);
+    fn get_f64(&mut self) -> f64 {
+        f64::from_bits(Self::get_u64(self))
     }
 
     /// Gets an IEEE754 double-precision (8 bytes) floating point number from
     /// `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Buf;
-    /// use std::io::Cursor;
     ///
-    /// let mut buf = Cursor::new(b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello");
+    /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..];
     /// assert_eq!(1.2f64, buf.get_f64_le());
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining data in `self`.
     fn get_f64_le(&mut self) -> f64 {
-        buf_get_impl!(self, 8, LittleEndian::read_f64);
-    }
-
-    /// Transforms a `Buf` into a concrete buffer.
-    ///
-    /// `collect()` can operate on any value that implements `Buf`, and turn it
-    /// into the relevent concrete buffer type.
-    ///
-    /// # Examples
-    ///
-    /// Collecting a buffer and loading the contents into a `Vec<u8>`.
-    ///
-    /// ```
-    /// use bytes::{Buf, Bytes, IntoBuf};
-    ///
-    /// let buf = Bytes::from(&b"hello world"[..]).into_buf();
-    /// let vec: Vec<u8> = buf.collect();
-    ///
-    /// assert_eq!(vec, &b"hello world"[..]);
-    /// ```
-    fn collect<B>(self) -> B
-        where Self: Sized,
-              B: FromBuf,
-    {
-        B::from_buf(self)
+        f64::from_bits(Self::get_u64_le(self))
     }
 
-    /// Creates an adaptor which will read at most `limit` bytes from `self`.
-    ///
-    /// This function returns a new instance of `Buf` which will read at most
-    /// `limit` bytes.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
-    ///
-    /// let mut buf = Cursor::new("hello world").take(5);
-    /// let mut dst = vec![];
-    ///
-    /// dst.put(&mut buf);
-    /// assert_eq!(dst, b"hello");
-    ///
-    /// let mut buf = buf.into_inner();
-    /// dst.clear();
-    /// dst.put(&mut buf);
-    /// assert_eq!(dst, b" world");
-    /// ```
-    fn take(self, limit: usize) -> Take<Self>
-        where Self: Sized
-    {
-        super::take::new(self, limit)
-    }
-
-    /// Creates an adaptor which will chain this buffer with another.
-    ///
-    /// The returned `Buf` instance will first consume all bytes from `self`.
-    /// Afterwards the output is equivalent to the output of next.
+    /// Consumes remaining bytes inside self and returns new instance of `Bytes`
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
-    /// use bytes::buf::Chain;
-    ///
-    /// let buf = Bytes::from(&b"hello "[..]).into_buf()
-    ///             .chain(Bytes::from(&b"world"[..]));
-    ///
-    /// let full: Bytes = buf.collect();
-    /// assert_eq!(full[..], b"hello world"[..]);
-    /// ```
-    fn chain<U>(self, next: U) -> Chain<Self, U::Buf>
-        where U: IntoBuf,
-              Self: Sized,
-    {
-        Chain::new(self, next.into_buf())
-    }
-
-    /// Creates a "by reference" adaptor for this instance of `Buf`.
+    /// use bytes::Buf;
     ///
-    /// The returned adaptor also implements `Buf` and will simply borrow `self`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
-    ///
-    /// let mut buf = Cursor::new("hello world");
-    /// let mut dst = vec![];
-    ///
-    /// {
-    ///     let mut reference = buf.by_ref();
-    ///     dst.put(&mut reference.take(5));
-    ///     assert_eq!(dst, b"hello");
-    /// } // drop our &mut reference so we can use `buf` again
-    ///
-    /// dst.clear();
-    /// dst.put(&mut buf);
-    /// assert_eq!(dst, b" world");
+    /// let bytes = (&b"hello world"[..]).to_bytes();
+    /// assert_eq!(&bytes[..], &b"hello world"[..]);
     /// ```
-    fn by_ref(&mut self) -> &mut Self where Self: Sized {
-        self
-    }
-
-    /// Creates an adaptor which implements the `Read` trait for `self`.
-    ///
-    /// This function returns a new value which implements `Read` by adapting
-    /// the `Read` trait functions to the `Buf` trait functions. Given that
-    /// `Buf` operations are infallible, none of the `Read` functions will
-    /// return with `Err`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{Buf, IntoBuf, Bytes};
-    /// use std::io::Read;
-    ///
-    /// let buf = Bytes::from("hello world").into_buf();
-    ///
-    /// let mut reader = buf.reader();
-    /// let mut dst = [0; 1024];
-    ///
-    /// let num = reader.read(&mut dst).unwrap();
-    ///
-    /// assert_eq!(11, num);
-    /// assert_eq!(&dst[..11], b"hello world");
-    /// ```
-    fn reader(self) -> Reader<Self> where Self: Sized {
-        super::reader::new(self)
-    }
-
-    /// Returns an iterator over the bytes contained by the buffer.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{Buf, IntoBuf, Bytes};
-    ///
-    /// let buf = Bytes::from(&b"abc"[..]).into_buf();
-    /// let mut iter = buf.iter();
-    ///
-    /// assert_eq!(iter.next(), Some(b'a'));
-    /// assert_eq!(iter.next(), Some(b'b'));
-    /// assert_eq!(iter.next(), Some(b'c'));
-    /// assert_eq!(iter.next(), None);
-    /// ```
-    fn iter(self) -> Iter<Self> where Self: Sized {
-        super::iter::new(self)
+    fn to_bytes(&mut self) -> crate::Bytes {
+        use super::BufMut;
+        let mut ret = crate::BytesMut::with_capacity(self.remaining());
+        ret.put(self);
+        ret.freeze()
     }
 }
 
-impl<'a, T: Buf + ?Sized> Buf for &'a mut T {
+macro_rules! deref_forward_buf {
+    () => (
     fn remaining(&self) -> usize {
         (**self).remaining()
     }
 
     fn bytes(&self) -> &[u8] {
         (**self).bytes()
     }
 
-    fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize {
-        (**self).bytes_vec(dst)
-    }
-
-    fn advance(&mut self, cnt: usize) {
-        (**self).advance(cnt)
-    }
-}
-
-impl<T: Buf + ?Sized> Buf for Box<T> {
-    fn remaining(&self) -> usize {
-        (**self).remaining()
-    }
-
-    fn bytes(&self) -> &[u8] {
-        (**self).bytes()
-    }
-
-    fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize {
-        (**self).bytes_vec(dst)
+    #[cfg(feature = "std")]
+    fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
+        (**self).bytes_vectored(dst)
     }
 
     fn advance(&mut self, cnt: usize) {
         (**self).advance(cnt)
     }
-}
+
+    fn has_remaining(&self) -> bool {
+        (**self).has_remaining()
+    }
+
+    fn copy_to_slice(&mut self, dst: &mut [u8]) {
+        (**self).copy_to_slice(dst)
+    }
+
+    fn get_u8(&mut self) -> u8 {
+        (**self).get_u8()
+    }
+
+    fn get_i8(&mut self) -> i8 {
+        (**self).get_i8()
+    }
+
+    fn get_u16(&mut self) -> u16 {
+        (**self).get_u16()
+    }
+
+    fn get_u16_le(&mut self) -> u16 {
+        (**self).get_u16_le()
+    }
 
-impl<T: AsRef<[u8]>> Buf for io::Cursor<T> {
-    fn remaining(&self) -> usize {
-        let len = self.get_ref().as_ref().len();
-        let pos = self.position();
+    fn get_i16(&mut self) -> i16 {
+        (**self).get_i16()
+    }
+
+    fn get_i16_le(&mut self) -> i16 {
+        (**self).get_i16_le()
+    }
+
+    fn get_u32(&mut self) -> u32 {
+        (**self).get_u32()
+    }
 
-        if pos >= len as u64 {
-            return 0;
-        }
+    fn get_u32_le(&mut self) -> u32 {
+        (**self).get_u32_le()
+    }
 
-        len - pos as usize
+    fn get_i32(&mut self) -> i32 {
+        (**self).get_i32()
+    }
+
+    fn get_i32_le(&mut self) -> i32 {
+        (**self).get_i32_le()
+    }
+
+    fn get_u64(&mut self) -> u64 {
+        (**self).get_u64()
     }
 
-    fn bytes(&self) -> &[u8] {
-        let len = self.get_ref().as_ref().len();
-        let pos = self.position() as usize;
+    fn get_u64_le(&mut self) -> u64 {
+        (**self).get_u64_le()
+    }
+
+    fn get_i64(&mut self) -> i64 {
+        (**self).get_i64()
+    }
+
+    fn get_i64_le(&mut self) -> i64 {
+        (**self).get_i64_le()
+    }
 
-        if pos >= len {
-            return Default::default();
-        }
+    fn get_uint(&mut self, nbytes: usize) -> u64 {
+        (**self).get_uint(nbytes)
+    }
 
-        &(self.get_ref().as_ref())[pos..]
+    fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+        (**self).get_uint_le(nbytes)
+    }
+
+    fn get_int(&mut self, nbytes: usize) -> i64 {
+        (**self).get_int(nbytes)
+    }
+
+    fn get_int_le(&mut self, nbytes: usize) -> i64 {
+        (**self).get_int_le(nbytes)
     }
 
-    fn advance(&mut self, cnt: usize) {
-        let pos = (self.position() as usize)
-            .checked_add(cnt).expect("overflow");
+    fn to_bytes(&mut self) -> crate::Bytes {
+        (**self).to_bytes()
+    }
+
+    )
+}
+
+impl<T: Buf + ?Sized> Buf for &mut T {
+    deref_forward_buf!();
+}
+
+impl<T: Buf + ?Sized> Buf for Box<T> {
+    deref_forward_buf!();
+}
 
-        assert!(pos <= self.get_ref().as_ref().len());
+impl Buf for &[u8] {
+    #[inline]
+    fn remaining(&self) -> usize {
+        self.len()
+    }
 
-        self.set_position(pos as u64);
+    #[inline]
+    fn bytes(&self) -> &[u8] {
+        self
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        *self = &self[cnt..];
     }
 }
 
 impl Buf for Option<[u8; 1]> {
     fn remaining(&self) -> usize {
         if self.is_some() {
             1
         } else {
@@ -1143,11 +963,44 @@ impl Buf for Option<[u8; 1]> {
             panic!("overflow");
         } else {
             assert_eq!(1, cnt);
             *self = None;
         }
     }
 }
 
-// The existance of this function makes the compiler catch if the Buf
+#[cfg(feature = "std")]
+impl<T: AsRef<[u8]>> Buf for std::io::Cursor<T> {
+    fn remaining(&self) -> usize {
+        let len = self.get_ref().as_ref().len();
+        let pos = self.position();
+
+        if pos >= len as u64 {
+            return 0;
+        }
+
+        len - pos as usize
+    }
+
+    fn bytes(&self) -> &[u8] {
+        let len = self.get_ref().as_ref().len();
+        let pos = self.position();
+
+        if pos >= len as u64 {
+            return &[];
+        }
+
+        &self.get_ref().as_ref()[pos as usize..]
+    }
+
+    fn advance(&mut self, cnt: usize) {
+        let pos = (self.position() as usize)
+            .checked_add(cnt).expect("overflow");
+
+        assert!(pos <= self.get_ref().as_ref().len());
+        self.set_position(pos as u64);
+    }
+}
+
+// The existence of this function makes the compiler catch if the Buf
 // trait is "object-safe" or not.
-fn _assert_trait_object(_b: &Buf) {}
+fn _assert_trait_object(_b: &dyn Buf) {}
--- a/third_party/rust/bytes/src/buf/buf_mut.rs
+++ b/third_party/rust/bytes/src/buf/buf_mut.rs
@@ -1,13 +1,14 @@
-use super::{IntoBuf, Writer};
-use byteorder::{LittleEndian, ByteOrder, BigEndian};
-use iovec::IoVec;
+use core::{cmp, mem::{self, MaybeUninit}, ptr, usize};
 
-use std::{cmp, io, ptr, usize};
+#[cfg(feature = "std")]
+use std::fmt;
+
+use alloc::{vec::Vec, boxed::Box};
 
 /// A trait for values that provide sequential write access to bytes.
 ///
 /// Write bytes to a buffer
 ///
 /// A buffer stores bytes in memory such that write operations are infallible.
 /// The underlying storage may or may not be in contiguous memory. A `BufMut`
 /// value is a cursor into the buffer. Writing to `BufMut` advances the cursor
@@ -15,40 +16,39 @@ use std::{cmp, io, ptr, usize};
 ///
 /// The simplest `BufMut` is a `Vec<u8>`.
 ///
 /// ```
 /// use bytes::BufMut;
 ///
 /// let mut buf = vec![];
 ///
-/// buf.put("hello world");
+/// buf.put(&b"hello world"[..]);
 ///
 /// assert_eq!(buf, b"hello world");
 /// ```
 pub trait BufMut {
     /// Returns the number of bytes that can be written from the current
     /// position until the end of the buffer is reached.
     ///
     /// This value is greater than or equal to the length of the slice returned
     /// by `bytes_mut`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
-    /// use std::io::Cursor;
     ///
     /// let mut dst = [0; 10];
-    /// let mut buf = Cursor::new(&mut dst[..]);
+    /// let mut buf = &mut dst[..];
     ///
-    /// assert_eq!(10, buf.remaining_mut());
-    /// buf.put("hello");
+    /// let original_remaining = buf.remaining_mut();
+    /// buf.put(&b"hello"[..]);
     ///
-    /// assert_eq!(5, buf.remaining_mut());
+    /// assert_eq!(original_remaining - 5, buf.remaining_mut());
     /// ```
     ///
     /// # Implementer notes
     ///
     /// Implementations of `remaining_mut` should ensure that the return value
     /// does not change unless a call is made to `advance_mut` or any other
     /// function that is documented to change the `BufMut`'s current position.
     fn remaining_mut(&self) -> usize;
@@ -64,23 +64,25 @@ pub trait BufMut {
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = Vec::with_capacity(16);
     ///
     /// unsafe {
-    ///     buf.bytes_mut()[0] = b'h';
-    ///     buf.bytes_mut()[1] = b'e';
+    ///     // MaybeUninit::as_mut_ptr
+    ///     buf.bytes_mut()[0].as_mut_ptr().write(b'h');
+    ///     buf.bytes_mut()[1].as_mut_ptr().write(b'e');
     ///
     ///     buf.advance_mut(2);
     ///
-    ///     buf.bytes_mut()[0] = b'l';
-    ///     buf.bytes_mut()[1..3].copy_from_slice(b"lo");
+    ///     buf.bytes_mut()[0].as_mut_ptr().write(b'l');
+    ///     buf.bytes_mut()[1].as_mut_ptr().write(b'l');
+    ///     buf.bytes_mut()[2].as_mut_ptr().write(b'o');
     ///
     ///     buf.advance_mut(3);
     /// }
     ///
     /// assert_eq!(5, buf.len());
     /// assert_eq!(buf, b"hello");
     /// ```
     ///
@@ -100,193 +102,191 @@ pub trait BufMut {
     /// Returns true if there is space in `self` for more bytes.
     ///
     /// This is equivalent to `self.remaining_mut() != 0`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
-    /// use std::io::Cursor;
     ///
     /// let mut dst = [0; 5];
-    /// let mut buf = Cursor::new(&mut dst);
+    /// let mut buf = &mut dst[..];
     ///
     /// assert!(buf.has_remaining_mut());
     ///
-    /// buf.put("hello");
+    /// buf.put(&b"hello"[..]);
     ///
     /// assert!(!buf.has_remaining_mut());
     /// ```
     fn has_remaining_mut(&self) -> bool {
         self.remaining_mut() > 0
     }
 
     /// Returns a mutable slice starting at the current BufMut position and of
-    /// length between 0 and `BufMut::remaining_mut()`.
+    /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the
+    /// whole remainder of the buffer (this allows non-continuous implementation).
     ///
     /// This is a lower level function. Most operations are done with other
     /// functions.
     ///
     /// The returned byte slice may represent uninitialized memory.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = Vec::with_capacity(16);
     ///
     /// unsafe {
-    ///     buf.bytes_mut()[0] = b'h';
-    ///     buf.bytes_mut()[1] = b'e';
+    ///     // MaybeUninit::as_mut_ptr
+    ///     buf.bytes_mut()[0].as_mut_ptr().write(b'h');
+    ///     buf.bytes_mut()[1].as_mut_ptr().write(b'e');
     ///
     ///     buf.advance_mut(2);
     ///
-    ///     buf.bytes_mut()[0] = b'l';
-    ///     buf.bytes_mut()[1..3].copy_from_slice(b"lo");
+    ///     buf.bytes_mut()[0].as_mut_ptr().write(b'l');
+    ///     buf.bytes_mut()[1].as_mut_ptr().write(b'l');
+    ///     buf.bytes_mut()[2].as_mut_ptr().write(b'o');
     ///
     ///     buf.advance_mut(3);
     /// }
     ///
     /// assert_eq!(5, buf.len());
     /// assert_eq!(buf, b"hello");
     /// ```
     ///
     /// # Implementer notes
     ///
     /// This function should never panic. `bytes_mut` should return an empty
     /// slice **if and only if** `remaining_mut` returns 0. In other words,
     /// `bytes_mut` returning an empty slice implies that `remaining_mut` will
     /// return 0 and `remaining_mut` returning 0 implies that `bytes_mut` will
     /// return an empty slice.
-    unsafe fn bytes_mut(&mut self) -> &mut [u8];
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>];
 
     /// Fills `dst` with potentially multiple mutable slices starting at `self`'s
     /// current position.
     ///
-    /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vec_mut`
+    /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vectored_mut`
     /// enables fetching more than one slice at once. `dst` is a slice of
-    /// mutable `IoVec` references, enabling the slice to be directly used with
+    /// mutable `IoSliceMut` references, enabling the slice to be directly used with
     /// [`readv`] without any further conversion. The sum of the lengths of all
     /// the buffers in `dst` will be less than or equal to
     /// `Buf::remaining_mut()`.
     ///
     /// The entries in `dst` will be overwritten, but the data **contained** by
-    /// the slices **will not** be modified. If `bytes_vec_mut` does not fill every
+    /// the slices **will not** be modified. If `bytes_vectored_mut` does not fill every
     /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices
     /// in `self.
     ///
     /// This is a lower level function. Most operations are done with other
     /// functions.
     ///
     /// # Implementer notes
     ///
     /// This function should never panic. Once the end of the buffer is reached,
-    /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vec_mut` must
+    /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vectored_mut` must
     /// return 0 without mutating `dst`.
     ///
     /// Implementations should also take care to properly handle being called
     /// with `dst` being a zero length slice.
     ///
     /// [`readv`]: http://man7.org/linux/man-pages/man2/readv.2.html
-    unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize {
+    #[cfg(feature = "std")]
+    fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize {
         if dst.is_empty() {
             return 0;
         }
 
         if self.has_remaining_mut() {
-            dst[0] = self.bytes_mut().into();
+            dst[0] = IoSliceMut::from(self.bytes_mut());
             1
         } else {
             0
         }
     }
 
     /// Transfer bytes into `self` from `src` and advance the cursor by the
     /// number of bytes written.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
     ///
-    /// buf.put(b'h');
+    /// buf.put_u8(b'h');
     /// buf.put(&b"ello"[..]);
-    /// buf.put(" world");
+    /// buf.put(&b" world"[..]);
     ///
     /// assert_eq!(buf, b"hello world");
     /// ```
     ///
     /// # Panics
     ///
     /// Panics if `self` does not have enough capacity to contain `src`.
-    fn put<T: IntoBuf>(&mut self, src: T) where Self: Sized {
-        use super::Buf;
-
-        let mut src = src.into_buf();
-
+    fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized {
         assert!(self.remaining_mut() >= src.remaining());
 
         while src.has_remaining() {
             let l;
 
             unsafe {
                 let s = src.bytes();
                 let d = self.bytes_mut();
                 l = cmp::min(s.len(), d.len());
 
                 ptr::copy_nonoverlapping(
                     s.as_ptr(),
-                    d.as_mut_ptr(),
+                    d.as_mut_ptr() as *mut u8,
                     l);
             }
 
             src.advance(l);
             unsafe { self.advance_mut(l); }
         }
     }
 
     /// Transfer bytes into `self` from `src` and advance the cursor by the
     /// number of bytes written.
     ///
     /// `self` must have enough remaining capacity to contain all of `src`.
     ///
     /// ```
     /// use bytes::BufMut;
-    /// use std::io::Cursor;
     ///
     /// let mut dst = [0; 6];
     ///
     /// {
-    ///     let mut buf = Cursor::new(&mut dst);
+    ///     let mut buf = &mut dst[..];
     ///     buf.put_slice(b"hello");
     ///
     ///     assert_eq!(1, buf.remaining_mut());
     /// }
     ///
     /// assert_eq!(b"hello\0", &dst);
     /// ```
     fn put_slice(&mut self, src: &[u8]) {
         let mut off = 0;
 
-        assert!(self.remaining_mut() >= src.len(), "buffer overflow");
+        assert!(self.remaining_mut() >= src.len(), "buffer overflow; remaining = {}; src = {}", self.remaining_mut(), src.len());
 
         while off < src.len() {
             let cnt;
 
             unsafe {
                 let dst = self.bytes_mut();
                 cnt = cmp::min(dst.len(), src.len() - off);
 
                 ptr::copy_nonoverlapping(
                     src[off..].as_ptr(),
-                    dst.as_mut_ptr(),
+                    dst.as_mut_ptr() as *mut u8,
                     cnt);
 
                 off += cnt;
 
             }
 
             unsafe { self.advance_mut(cnt); }
         }
@@ -333,46 +333,36 @@ pub trait BufMut {
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_i8(&mut self, n: i8) {
         let src = [n as u8];
         self.put_slice(&src)
     }
 
-    #[doc(hidden)]
-    #[deprecated(note="use put_u16_be or put_u16_le")]
-    fn put_u16<T: ByteOrder>(&mut self, n: u16) where Self: Sized {
-        let mut buf = [0; 2];
-        T::write_u16(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
     /// Writes an unsigned 16 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_u16_be(0x0809);
+    /// buf.put_u16(0x0809);
     /// assert_eq!(buf, b"\x08\x09");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_u16_be(&mut self, n: u16) {
-        let mut buf = [0; 2];
-        BigEndian::write_u16(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_u16(&mut self, n: u16) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes an unsigned 16 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
@@ -384,51 +374,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x09\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_u16_le(&mut self, n: u16) {
-        let mut buf = [0; 2];
-        LittleEndian::write_u16(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_i16_be or put_i16_le")]
-    fn put_i16<T: ByteOrder>(&mut self, n: i16) where Self: Sized {
-        let mut buf = [0; 2];
-        T::write_i16(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes a signed 16 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_i16_be(0x0809);
+    /// buf.put_i16(0x0809);
     /// assert_eq!(buf, b"\x08\x09");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_i16_be(&mut self, n: i16) {
-        let mut buf = [0; 2];
-        BigEndian::write_i16(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_i16(&mut self, n: i16) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes a signed 16 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 2.
     ///
     /// # Examples
     ///
@@ -440,51 +418,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x09\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_i16_le(&mut self, n: i16) {
-        let mut buf = [0; 2];
-        LittleEndian::write_i16(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_u32_be or put_u32_le")]
-    fn put_u32<T: ByteOrder>(&mut self, n: u32) where Self: Sized {
-        let mut buf = [0; 4];
-        T::write_u32(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_u32_be(0x0809A0A1);
+    /// buf.put_u32(0x0809A0A1);
     /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_u32_be(&mut self, n: u32) {
-        let mut buf = [0; 4];
-        BigEndian::write_u32(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_u32(&mut self, n: u32) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes an unsigned 32 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
@@ -496,51 +462,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_u32_le(&mut self, n: u32) {
-        let mut buf = [0; 4];
-        LittleEndian::write_u32(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_i32_be or put_i32_le")]
-    fn put_i32<T: ByteOrder>(&mut self, n: i32) where Self: Sized {
-        let mut buf = [0; 4];
-        T::write_i32(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes a signed 32 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_i32_be(0x0809A0A1);
+    /// buf.put_i32(0x0809A0A1);
     /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_i32_be(&mut self, n: i32) {
-        let mut buf = [0; 4];
-        BigEndian::write_i32(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_i32(&mut self, n: i32) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes a signed 32 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
@@ -552,51 +506,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_i32_le(&mut self, n: i32) {
-        let mut buf = [0; 4];
-        LittleEndian::write_i32(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_u64_be or put_u64_le")]
-    fn put_u64<T: ByteOrder>(&mut self, n: u64) where Self: Sized {
-        let mut buf = [0; 8];
-        T::write_u64(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_u64_be(0x0102030405060708);
+    /// buf.put_u64(0x0102030405060708);
     /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_u64_be(&mut self, n: u64) {
-        let mut buf = [0; 8];
-        BigEndian::write_u64(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_u64(&mut self, n: u64) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes an unsigned 64 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
@@ -608,51 +550,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_u64_le(&mut self, n: u64) {
-        let mut buf = [0; 8];
-        LittleEndian::write_u64(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_i64_be or put_i64_le")]
-    fn put_i64<T: ByteOrder>(&mut self, n: i64) where Self: Sized {
-        let mut buf = [0; 8];
-        T::write_i64(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes a signed 64 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_i64_be(0x0102030405060708);
+    /// buf.put_i64(0x0102030405060708);
     /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_i64_be(&mut self, n: i64) {
-        let mut buf = [0; 8];
-        BigEndian::write_i64(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_i64(&mut self, n: i64) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes a signed 64 bit integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
@@ -664,155 +594,127 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_i64_le(&mut self, n: i64) {
-        let mut buf = [0; 8];
-        LittleEndian::write_i64(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_u128_be(0x01020304050607080910111213141516);
+    /// buf.put_u128(0x01020304050607080910111213141516);
     /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    #[cfg(feature = "i128")]
-    fn put_u128_be(&mut self, n: u128) {
-        let mut buf = [0; 16];
-        BigEndian::write_u128(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_u128(&mut self, n: u128) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes an unsigned 128 bit integer to `self` in little-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
     /// buf.put_u128_le(0x01020304050607080910111213141516);
     /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    #[cfg(feature = "i128")]
     fn put_u128_le(&mut self, n: u128) {
-        let mut buf = [0; 16];
-        LittleEndian::write_u128(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes a signed 128 bit integer to `self` in the big-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_i128_be(0x01020304050607080910111213141516);
+    /// buf.put_i128(0x01020304050607080910111213141516);
     /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    #[cfg(feature = "i128")]
-    fn put_i128_be(&mut self, n: i128) {
-        let mut buf = [0; 16];
-        BigEndian::write_i128(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_i128(&mut self, n: i128) {
+        self.put_slice(&n.to_be_bytes())
     }
 
     /// Writes a signed 128 bit integer to `self` in little-endian byte order.
     ///
-    /// **NOTE:** This method requires the `i128` feature.
     /// The current position is advanced by 16.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
     /// buf.put_i128_le(0x01020304050607080910111213141516);
     /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    #[cfg(feature = "i128")]
     fn put_i128_le(&mut self, n: i128) {
-        let mut buf = [0; 16];
-        LittleEndian::write_i128(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_uint_be or put_uint_le")]
-    fn put_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) where Self: Sized {
-        let mut buf = [0; 8];
-        T::write_uint(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
+        self.put_slice(&n.to_le_bytes())
     }
 
     /// Writes an unsigned n-byte integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_uint_be(0x010203, 3);
+    /// buf.put_uint(0x010203, 3);
     /// assert_eq!(buf, b"\x01\x02\x03");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_uint_be(&mut self, n: u64, nbytes: usize) {
-        let mut buf = [0; 8];
-        BigEndian::write_uint(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
+    fn put_uint(&mut self, n: u64, nbytes: usize) {
+        self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]);
     }
 
     /// Writes an unsigned n-byte integer to `self` in the little-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
@@ -824,51 +726,39 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_uint_le(&mut self, n: u64, nbytes: usize) {
-        let mut buf = [0; 8];
-        LittleEndian::write_uint(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_int_be or put_int_le")]
-    fn put_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) where Self: Sized {
-        let mut buf = [0; 8];
-        T::write_int(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
+        self.put_slice(&n.to_le_bytes()[0..nbytes]);
     }
 
     /// Writes a signed n-byte integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_int_be(0x010203, 3);
+    /// buf.put_int(0x010203, 3);
     /// assert_eq!(buf, b"\x01\x02\x03");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_int_be(&mut self, n: i64, nbytes: usize) {
-        let mut buf = [0; 8];
-        BigEndian::write_int(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
+    fn put_int(&mut self, n: i64, nbytes: usize) {
+        self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]);
     }
 
     /// Writes a signed n-byte integer to `self` in little-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
     ///
     /// # Examples
     ///
@@ -880,52 +770,40 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x03\x02\x01");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_int_le(&mut self, n: i64, nbytes: usize) {
-        let mut buf = [0; 8];
-        LittleEndian::write_int(&mut buf, n, nbytes);
-        self.put_slice(&buf[0..nbytes])
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_f32_be or put_f32_le")]
-    fn put_f32<T: ByteOrder>(&mut self, n: f32) where Self: Sized {
-        let mut buf = [0; 4];
-        T::write_f32(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_slice(&n.to_le_bytes()[0..nbytes]);
     }
 
     /// Writes  an IEEE754 single-precision (4 bytes) floating point number to
     /// `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_f32_be(1.2f32);
+    /// buf.put_f32(1.2f32);
     /// assert_eq!(buf, b"\x3F\x99\x99\x9A");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_f32_be(&mut self, n: f32) {
-        let mut buf = [0; 4];
-        BigEndian::write_f32(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_f32(&mut self, n: f32) {
+        self.put_u32(n.to_bits());
     }
 
     /// Writes  an IEEE754 single-precision (4 bytes) floating point number to
     /// `self` in little-endian byte order.
     ///
     /// The current position is advanced by 4.
     ///
     /// # Examples
@@ -938,52 +816,40 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x9A\x99\x99\x3F");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_f32_le(&mut self, n: f32) {
-        let mut buf = [0; 4];
-        LittleEndian::write_f32(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    #[doc(hidden)]
-    #[deprecated(note="use put_f64_be or put_f64_le")]
-    fn put_f64<T: ByteOrder>(&mut self, n: f64) where Self: Sized {
-        let mut buf = [0; 8];
-        T::write_f64(&mut buf, n);
-        self.put_slice(&buf)
+        self.put_u32_le(n.to_bits());
     }
 
     /// Writes  an IEEE754 double-precision (8 bytes) floating point number to
     /// `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::BufMut;
     ///
     /// let mut buf = vec![];
-    /// buf.put_f64_be(1.2f64);
+    /// buf.put_f64(1.2f64);
     /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
-    fn put_f64_be(&mut self, n: f64) {
-        let mut buf = [0; 8];
-        BigEndian::write_f64(&mut buf, n);
-        self.put_slice(&buf)
+    fn put_f64(&mut self, n: f64) {
+        self.put_u64(n.to_bits());
     }
 
     /// Writes  an IEEE754 double-precision (8 bytes) floating point number to
     /// `self` in little-endian byte order.
     ///
     /// The current position is advanced by 8.
     ///
     /// # Examples
@@ -996,138 +862,126 @@ pub trait BufMut {
     /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
     /// ```
     ///
     /// # Panics
     ///
     /// This function panics if there is not enough remaining capacity in
     /// `self`.
     fn put_f64_le(&mut self, n: f64) {
-        let mut buf = [0; 8];
-        LittleEndian::write_f64(&mut buf, n);
-        self.put_slice(&buf)
-    }
-
-    /// Creates a "by reference" adaptor for this instance of `BufMut`.
-    ///
-    /// The returned adapter also implements `BufMut` and will simply borrow
-    /// `self`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BufMut;
-    /// use std::io;
-    ///
-    /// let mut buf = vec![];
-    ///
-    /// {
-    ///     let mut reference = buf.by_ref();
-    ///
-    ///     // Adapt reference to `std::io::Write`.
-    ///     let mut writer = reference.writer();
-    ///
-    ///     // Use the buffer as a writter
-    ///     io::Write::write(&mut writer, &b"hello world"[..]).unwrap();
-    /// } // drop our &mut reference so that we can use `buf` again
-    ///
-    /// assert_eq!(buf, &b"hello world"[..]);
-    /// ```
-    fn by_ref(&mut self) -> &mut Self where Self: Sized {
-        self
-    }
-
-    /// Creates an adaptor which implements the `Write` trait for `self`.
-    ///
-    /// This function returns a new value which implements `Write` by adapting
-    /// the `Write` trait functions to the `BufMut` trait functions. Given that
-    /// `BufMut` operations are infallible, none of the `Write` functions will
-    /// return with `Err`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BufMut;
-    /// use std::io::Write;
-    ///
-    /// let mut buf = vec![].writer();
-    ///
-    /// let num = buf.write(&b"hello world"[..]).unwrap();
-    /// assert_eq!(11, num);
-    ///
-    /// let buf = buf.into_inner();
-    ///
-    /// assert_eq!(*buf, b"hello world"[..]);
-    /// ```
-    fn writer(self) -> Writer<Self> where Self: Sized {
-        super::writer::new(self)
+        self.put_u64_le(n.to_bits());
     }
 }
 
-impl<'a, T: BufMut + ?Sized> BufMut for &'a mut T {
+macro_rules! deref_forward_bufmut {
+    () => (
     fn remaining_mut(&self) -> usize {
         (**self).remaining_mut()
     }
 
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
         (**self).bytes_mut()
     }
 
-    unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize {
-        (**self).bytes_vec_mut(dst)
+    #[cfg(feature = "std")]
+    fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize {
+        (**self).bytes_vectored_mut(dst)
     }
 
     unsafe fn advance_mut(&mut self, cnt: usize) {
         (**self).advance_mut(cnt)
     }
+
+    fn put_slice(&mut self, src: &[u8]) {
+        (**self).put_slice(src)
+    }
+
+    fn put_u8(&mut self, n: u8) {
+        (**self).put_u8(n)
+    }
+
+    fn put_i8(&mut self, n: i8) {
+        (**self).put_i8(n)
+    }
+
+    fn put_u16(&mut self, n: u16) {
+        (**self).put_u16(n)
+    }
+
+    fn put_u16_le(&mut self, n: u16) {
+        (**self).put_u16_le(n)
+    }
+
+    fn put_i16(&mut self, n: i16) {
+        (**self).put_i16(n)
+    }
+
+    fn put_i16_le(&mut self, n: i16) {
+        (**self).put_i16_le(n)
+    }
+
+    fn put_u32(&mut self, n: u32) {
+        (**self).put_u32(n)
+    }
+
+    fn put_u32_le(&mut self, n: u32) {
+        (**self).put_u32_le(n)
+    }
+
+    fn put_i32(&mut self, n: i32) {
+        (**self).put_i32(n)
+    }
+
+    fn put_i32_le(&mut self, n: i32) {
+        (**self).put_i32_le(n)
+    }
+
+    fn put_u64(&mut self, n: u64) {
+        (**self).put_u64(n)
+    }
+
+    fn put_u64_le(&mut self, n: u64) {
+        (**self).put_u64_le(n)
+    }
+
+    fn put_i64(&mut self, n: i64) {
+        (**self).put_i64(n)
+    }
+
+    fn put_i64_le(&mut self, n: i64) {
+        (**self).put_i64_le(n)
+    }
+    )
+}
+
+impl<T: BufMut + ?Sized> BufMut for &mut T {
+    deref_forward_bufmut!();
 }
 
 impl<T: BufMut + ?Sized> BufMut for Box<T> {
-    fn remaining_mut(&self) -> usize {
-        (**self).remaining_mut()
-    }
-
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
-        (**self).bytes_mut()
-    }
-
-    unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize {
-        (**self).bytes_vec_mut(dst)
-    }
-
-    unsafe fn advance_mut(&mut self, cnt: usize) {
-        (**self).advance_mut(cnt)
-    }
+    deref_forward_bufmut!();
 }
 
-impl<T: AsMut<[u8]> + AsRef<[u8]>> BufMut for io::Cursor<T> {
+impl BufMut for &mut [u8] {
+    #[inline]
     fn remaining_mut(&self) -> usize {
-        use Buf;
-        self.remaining()
-    }
-
-    /// Advance the internal cursor of the BufMut
-    unsafe fn advance_mut(&mut self, cnt: usize) {
-        use Buf;
-        self.advance(cnt);
+        self.len()
     }
 
-    /// Returns a mutable slice starting at the current BufMut position and of
-    /// length between 0 and `BufMut::remaining()`.
-    ///
-    /// The returned byte slice may represent uninitialized memory.
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
-        let len = self.get_ref().as_ref().len();
-        let pos = self.position() as usize;
+    #[inline]
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        // MaybeUninit is repr(transparent), so safe to transmute
+        unsafe { mem::transmute(&mut **self) }
+    }
 
-        if pos >= len {
-            return Default::default();
-        }
-
-        &mut (self.get_mut().as_mut())[pos..]
+    #[inline]
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        // Lifetime dance taken from `impl Write for &mut [u8]`.
+        let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt);
+        *self = b;
     }
 }
 
 impl BufMut for Vec<u8> {
     #[inline]
     fn remaining_mut(&self) -> usize {
         usize::MAX - self.len()
     }
@@ -1141,26 +995,94 @@ impl BufMut for Vec<u8> {
             // will not overflow usize.
             self.reserve(cnt);
         }
 
         self.set_len(len + cnt);
     }
 
     #[inline]
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
-        use std::slice;
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        use core::slice;
 
         if self.capacity() == self.len() {
             self.reserve(64); // Grow the vec
         }
 
         let cap = self.capacity();
         let len = self.len();
 
-        let ptr = self.as_mut_ptr();
-        &mut slice::from_raw_parts_mut(ptr, cap)[len..]
+        let ptr = self.as_mut_ptr() as *mut MaybeUninit<u8>;
+        unsafe {
+            &mut slice::from_raw_parts_mut(ptr, cap)[len..]
+        }
+    }
+
+    // Specialize these methods so they can skip checking `remaining_mut`
+    // and `advance_mut`.
+
+    fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized {
+        // In case the src isn't contiguous, reserve upfront
+        self.reserve(src.remaining());
+
+        while src.has_remaining() {
+            let l;
+
+            // a block to contain the src.bytes() borrow
+            {
+                let s = src.bytes();
+                l = s.len();
+                self.extend_from_slice(s);
+            }
+
+            src.advance(l);
+        }
+    }
+
+    fn put_slice(&mut self, src: &[u8]) {
+        self.extend_from_slice(src);
     }
 }
 
-// The existance of this function makes the compiler catch if the BufMut
+// The existence of this function makes the compiler catch if the BufMut
 // trait is "object-safe" or not.
-fn _assert_trait_object(_b: &BufMut) {}
+fn _assert_trait_object(_b: &dyn BufMut) {}
+
+// ===== impl IoSliceMut =====
+
+/// A buffer type used for `readv`.
+///
+/// This is a wrapper around an `std::io::IoSliceMut`, but does not expose
+/// the inner bytes in a safe API, as they may point at uninitialized memory.
+///
+/// This is `repr(transparent)` of the `std::io::IoSliceMut`, so it is valid to
+/// transmute them. However, as the memory might be uninitialized, care must be
+/// taken to not *read* the internal bytes, only *write* to them.
+#[repr(transparent)]
+#[cfg(feature = "std")]
+pub struct IoSliceMut<'a>(std::io::IoSliceMut<'a>);
+
+#[cfg(feature = "std")]
+impl fmt::Debug for IoSliceMut<'_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("IoSliceMut")
+            .field("len", &self.0.len())
+            .finish()
+    }
+}
+
+#[cfg(feature = "std")]
+impl<'a> From<&'a mut [u8]> for IoSliceMut<'a> {
+    fn from(buf: &'a mut [u8]) -> IoSliceMut<'a> {
+        IoSliceMut(std::io::IoSliceMut::new(buf))
+    }
+}
+
+#[cfg(feature = "std")]
+impl<'a> From<&'a mut [MaybeUninit<u8>]> for IoSliceMut<'a> {
+    fn from(buf: &'a mut [MaybeUninit<u8>]) -> IoSliceMut<'a> {
+        IoSliceMut(std::io::IoSliceMut::new(unsafe {
+            // We don't look at the contents, and `std::io::IoSliceMut`
+            // doesn't either.
+            mem::transmute::<&'a mut [MaybeUninit<u8>], &'a mut [u8]>(buf)
+        }))
+    }
+}
copy from third_party/rust/bytes/src/buf/chain.rs
copy to third_party/rust/bytes/src/buf/ext/chain.rs
--- a/third_party/rust/bytes/src/buf/chain.rs
+++ b/third_party/rust/bytes/src/buf/ext/chain.rs
@@ -1,147 +1,140 @@
-use {Buf, BufMut};
-use iovec::IoVec;
+use crate::{Buf, BufMut};
+use crate::buf::IntoIter;
+
+use core::mem::MaybeUninit;
+
+#[cfg(feature = "std")]
+use std::io::{IoSlice};
+#[cfg(feature = "std")]
+use crate::buf::IoSliceMut;
 
 /// A `Chain` sequences two buffers.
 ///
 /// `Chain` is an adapter that links two underlying buffers and provides a
-/// continous view across both buffers. It is able to sequence either immutable
+/// continuous view across both buffers. It is able to sequence either immutable
 /// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
 ///
 /// This struct is generally created by calling [`Buf::chain`]. Please see that
 /// function's documentation for more detail.
 ///
 /// # Examples
 ///
 /// ```
-/// use bytes::{Bytes, Buf, IntoBuf};
-/// use bytes::buf::Chain;
+/// use bytes::{Bytes, Buf, buf::BufExt};
 ///
-/// let buf = Bytes::from(&b"hello "[..]).into_buf()
-///             .chain(Bytes::from(&b"world"[..]));
+/// let mut buf = (&b"hello "[..])
+///     .chain(&b"world"[..]);
 ///
-/// let full: Bytes = buf.collect();
+/// let full: Bytes = buf.to_bytes();
 /// assert_eq!(full[..], b"hello world"[..]);
 /// ```
 ///
 /// [`Buf::chain`]: trait.Buf.html#method.chain
 /// [`Buf`]: trait.Buf.html
 /// [`BufMut`]: trait.BufMut.html
 #[derive(Debug)]
 pub struct Chain<T, U> {
     a: T,
     b: U,
 }
 
 impl<T, U> Chain<T, U> {
     /// Creates a new `Chain` sequencing the provided values.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    /// use bytes::buf::Chain;
-    ///
-    /// let buf = Chain::new(
-    ///     BytesMut::with_capacity(1024),
-    ///     BytesMut::with_capacity(1024));
-    ///
-    /// // Use the chained buffer
-    /// ```
     pub fn new(a: T, b: U) -> Chain<T, U> {
         Chain {
-            a: a,
-            b: b,
+            a,
+            b,
         }
     }
 
     /// Gets a reference to the first underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
+    /// use bytes::buf::BufExt;
     ///
-    /// let buf = Bytes::from(&b"hello"[..]).into_buf()
-    ///             .chain(Bytes::from(&b"world"[..]));
+    /// let buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
     ///
-    /// assert_eq!(buf.first_ref().get_ref()[..], b"hello"[..]);
+    /// assert_eq!(buf.first_ref()[..], b"hello"[..]);
     /// ```
     pub fn first_ref(&self) -> &T {
         &self.a
     }
 
     /// Gets a mutable reference to the first underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
+    /// use bytes::{Buf, buf::BufExt};
     ///
-    /// let mut buf = Bytes::from(&b"hello "[..]).into_buf()
-    ///                 .chain(Bytes::from(&b"world"[..]));
+    /// let mut buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
     ///
-    /// buf.first_mut().set_position(1);
+    /// buf.first_mut().advance(1);
     ///
-    /// let full: Bytes = buf.collect();
-    /// assert_eq!(full[..], b"ello world"[..]);
+    /// let full = buf.to_bytes();
+    /// assert_eq!(full, b"elloworld"[..]);
     /// ```
     pub fn first_mut(&mut self) -> &mut T {
         &mut self.a
     }
 
     /// Gets a reference to the last underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
+    /// use bytes::buf::BufExt;
     ///
-    /// let buf = Bytes::from(&b"hello"[..]).into_buf()
-    ///             .chain(Bytes::from(&b"world"[..]));
+    /// let buf = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
     ///
-    /// assert_eq!(buf.last_ref().get_ref()[..], b"world"[..]);
+    /// assert_eq!(buf.last_ref()[..], b"world"[..]);
     /// ```
     pub fn last_ref(&self) -> &U {
         &self.b
     }
 
     /// Gets a mutable reference to the last underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
+    /// use bytes::{Buf, buf::BufExt};
     ///
-    /// let mut buf = Bytes::from(&b"hello "[..]).into_buf()
-    ///                 .chain(Bytes::from(&b"world"[..]));
+    /// let mut buf = (&b"hello "[..])
+    ///     .chain(&b"world"[..]);
     ///
-    /// buf.last_mut().set_position(1);
+    /// buf.last_mut().advance(1);
     ///
-    /// let full: Bytes = buf.collect();
-    /// assert_eq!(full[..], b"hello orld"[..]);
+    /// let full = buf.to_bytes();
+    /// assert_eq!(full, b"hello orld"[..]);
     /// ```
     pub fn last_mut(&mut self) -> &mut U {
         &mut self.b
     }
 
     /// Consumes this `Chain`, returning the underlying values.
     ///
     /// # Examples
     ///
     /// ```
-    /// use bytes::{Bytes, Buf, IntoBuf};
+    /// use bytes::buf::BufExt;
     ///
-    /// let buf = Bytes::from(&b"hello"[..]).into_buf()
-    ///             .chain(Bytes::from(&b"world"[..]));
+    /// let chain = (&b"hello"[..])
+    ///     .chain(&b"world"[..]);
     ///
-    /// let (first, last) = buf.into_inner();
-    /// assert_eq!(first.get_ref()[..], b"hello"[..]);
-    /// assert_eq!(last.get_ref()[..], b"world"[..]);
+    /// let (first, last) = chain.into_inner();
+    /// assert_eq!(first[..], b"hello"[..]);
+    /// assert_eq!(last[..], b"world"[..]);
     /// ```
     pub fn into_inner(self) -> (T, U) {
         (self.a, self.b)
     }
 }
 
 impl<T, U> Buf for Chain<T, U>
     where T: Buf,
@@ -172,32 +165,33 @@ impl<T, U> Buf for Chain<T, U>
             self.a.advance(a_rem);
 
             cnt -= a_rem;
         }
 
         self.b.advance(cnt);
     }
 
-    fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize {
-        let mut n = self.a.bytes_vec(dst);
-        n += self.b.bytes_vec(&mut dst[n..]);
+    #[cfg(feature = "std")]
+    fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+        let mut n = self.a.bytes_vectored(dst);
+        n += self.b.bytes_vectored(&mut dst[n..]);
         n
     }
 }
 
 impl<T, U> BufMut for Chain<T, U>
     where T: BufMut,
           U: BufMut,
 {
     fn remaining_mut(&self) -> usize {
         self.a.remaining_mut() + self.b.remaining_mut()
     }
 
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
         if self.a.has_remaining_mut() {
             self.a.bytes_mut()
         } else {
             self.b.bytes_mut()
         }
     }
 
     unsafe fn advance_mut(&mut self, mut cnt: usize) {
@@ -213,14 +207,28 @@ impl<T, U> BufMut for Chain<T, U>
             self.a.advance_mut(a_rem);
 
             cnt -= a_rem;
         }
 
         self.b.advance_mut(cnt);
     }
 
-    unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize {
-        let mut n = self.a.bytes_vec_mut(dst);
-        n += self.b.bytes_vec_mut(&mut dst[n..]);
+    #[cfg(feature = "std")]
+    fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize {
+        let mut n = self.a.bytes_vectored_mut(dst);
+        n += self.b.bytes_vectored_mut(&mut dst[n..]);
         n
     }
 }
+
+impl<T, U> IntoIterator for Chain<T, U>
+where
+    T: Buf,
+    U: Buf,
+{
+    type Item = u8;
+    type IntoIter = IntoIter<Chain<T, U>>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        IntoIter::new(self)
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/src/buf/ext/limit.rs
@@ -0,0 +1,77 @@
+use crate::BufMut;
+
+use core::{cmp, mem::MaybeUninit};
+
+/// A `BufMut` adapter which limits the amount of bytes that can be written
+/// to an underlying buffer.
+#[derive(Debug)]
+pub struct Limit<T> {
+    inner: T,
+    limit: usize,
+}
+
+pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
+    Limit {
+        inner,
+        limit,
+    }
+}
+
+impl<T> Limit<T> {
+    /// Consumes this `Limit`, returning the underlying value.
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    /// Gets a reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    pub fn get_ref(&self) -> &T {
+        &self.inner
+    }
+
+    /// Gets a mutable reference to the underlying `BufMut`.
+    ///
+    /// It is inadvisable to directly write to the underlying `BufMut`.
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+
+    /// Returns the maximum number of bytes that can be written
+    ///
+    /// # Note
+    ///
+    /// If the inner `BufMut` has fewer bytes than indicated by this method then
+    /// that is the actual number of available bytes.
+    pub fn limit(&self) -> usize {
+        self.limit
+    }
+
+    /// Sets the maximum number of bytes that can be written.
+    ///
+    /// # Note
+    ///
+    /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual
+    /// number of available bytes.
+    pub fn set_limit(&mut self, lim: usize) {
+        self.limit = lim
+    }
+}
+
+impl<T: BufMut> BufMut for Limit<T> {
+    fn remaining_mut(&self) -> usize {
+        cmp::min(self.inner.remaining_mut(), self.limit)
+    }
+
+    fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+        let bytes = self.inner.bytes_mut();
+        let end = cmp::min(bytes.len(), self.limit);
+        &mut bytes[..end]
+    }
+
+    unsafe fn advance_mut(&mut self, cnt: usize) {
+        assert!(cnt <= self.limit);
+        self.inner.advance_mut(cnt);
+        self.limit -= cnt;
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/src/buf/ext/mod.rs
@@ -0,0 +1,176 @@
+//! Extra utilities for `Buf` and `BufMut` types.
+
+use super::{Buf, BufMut};
+
+mod chain;
+mod limit;
+#[cfg(feature = "std")]
+mod reader;
+mod take;
+#[cfg(feature = "std")]
+mod writer;
+
+pub use self::limit::Limit;
+pub use self::take::Take;
+pub use self::chain::Chain;
+
+#[cfg(feature = "std")]
+pub use self::{reader::Reader, writer::Writer};
+
+/// Extra methods for implementations of `Buf`.
+pub trait BufExt: Buf {
+    /// Creates an adaptor which will read at most `limit` bytes from `self`.
+    ///
+    /// This function returns a new instance of `Buf` which will read at most
+    /// `limit` bytes.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Buf, BufMut, buf::BufExt};
+    ///
+    /// let mut buf = b"hello world"[..].take(5);
+    /// let mut dst = vec![];
+    ///
+    /// dst.put(&mut buf);
+    /// assert_eq!(dst, b"hello");
+    ///
+    /// let mut buf = buf.into_inner();
+    /// dst.clear();
+    /// dst.put(&mut buf);
+    /// assert_eq!(dst, b" world");
+    /// ```
+    fn take(self, limit: usize) -> Take<Self>
+        where Self: Sized
+    {
+        take::new(self, limit)
+    }
+
+    /// Creates an adaptor which will chain this buffer with another.
+    ///
+    /// The returned `Buf` instance will first consume all bytes from `self`.
+    /// Afterwards the output is equivalent to the output of next.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Buf, buf::BufExt};
+    ///
+    /// let mut chain = b"hello "[..].chain(&b"world"[..]);
+    ///
+    /// let full = chain.to_bytes();
+    /// assert_eq!(full.bytes(), b"hello world");
+    /// ```
+    fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
+        where Self: Sized
+    {
+        Chain::new(self, next)
+    }
+
+    /// Creates an adaptor which implements the `Read` trait for `self`.
+    ///
+    /// This function returns a new value which implements `Read` by adapting
+    /// the `Read` trait functions to the `Buf` trait functions. Given that
+    /// `Buf` operations are infallible, none of the `Read` functions will
+    /// return with `Err`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Buf, Bytes, buf::BufExt};
+    /// use std::io::Read;
+    ///
+    /// let buf = Bytes::from("hello world");
+    ///
+    /// let mut reader = buf.reader();
+    /// let mut dst = [0; 1024];
+    ///
+    /// let num = reader.read(&mut dst).unwrap();
+    ///
+    /// assert_eq!(11, num);
+    /// assert_eq!(&dst[..11], &b"hello world"[..]);
+    /// ```
+    #[cfg(feature = "std")]
+    fn reader(self) -> Reader<Self> where Self: Sized {
+        reader::new(self)
+    }
+}
+
+impl<B: Buf + ?Sized> BufExt for B {}
+
+/// Extra methods for implementations of `BufMut`.
+pub trait BufMutExt: BufMut {
+    /// Creates an adaptor which can write at most `limit` bytes to `self`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BufMut, buf::BufMutExt};
+    ///
+    /// let arr = &mut [0u8; 128][..];
+    /// assert_eq!(arr.remaining_mut(), 128);
+    ///
+    /// let dst = arr.limit(10);
+    /// assert_eq!(dst.remaining_mut(), 10);
+    /// ```
+    fn limit(self, limit: usize) -> Limit<Self>
+        where Self: Sized
+    {
+        limit::new(self, limit)
+    }
+
+    /// Creates an adaptor which implements the `Write` trait for `self`.
+    ///
+    /// This function returns a new value which implements `Write` by adapting
+    /// the `Write` trait functions to the `BufMut` trait functions. Given that
+    /// `BufMut` operations are infallible, none of the `Write` functions will
+    /// return with `Err`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BufMut, buf::BufMutExt};
+    /// use std::io::Write;
+    ///
+    /// let mut buf = vec![].writer();
+    ///
+    /// let num = buf.write(&b"hello world"[..]).unwrap();
+    /// assert_eq!(11, num);
+    ///
+    /// let buf = buf.into_inner();
+    ///
+    /// assert_eq!(*buf, b"hello world"[..]);
+    /// ```
+    #[cfg(feature = "std")]
+    fn writer(self) -> Writer<Self> where Self: Sized {
+        writer::new(self)
+    }
+
+    /// Creates an adapter which will chain this buffer with another.
+    ///
+    /// The returned `BufMut` instance will first write to all bytes from
+    /// `self`. Afterwards, it will write to `next`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BufMut, buf::BufMutExt};
+    ///
+    /// let mut a = [0u8; 5];
+    /// let mut b = [0u8; 6];
+    ///
+    /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
+    ///
+    /// chain.put_slice(b"hello world");
+    ///
+    /// assert_eq!(&a[..], b"hello");
+    /// assert_eq!(&b[..], b" world");
+    /// ```
+    fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
+        where Self: Sized
+    {
+        Chain::new(self, next)
+    }
+}
+
+impl<B: BufMut + ?Sized> BufMutExt for B {}
copy from third_party/rust/bytes/src/buf/reader.rs
copy to third_party/rust/bytes/src/buf/ext/reader.rs
--- a/third_party/rust/bytes/src/buf/reader.rs
+++ b/third_party/rust/bytes/src/buf/ext/reader.rs
@@ -1,76 +1,60 @@
-use {Buf};
+use crate::{Buf};
 
 use std::{cmp, io};
 
 /// A `Buf` adapter which implements `io::Read` for the inner value.
 ///
 /// This struct is generally created by calling `reader()` on `Buf`. See
 /// documentation of [`reader()`](trait.Buf.html#method.reader) for more
 /// details.
 #[derive(Debug)]
 pub struct Reader<B> {
     buf: B,
 }
 
 pub fn new<B>(buf: B) -> Reader<B> {
-    Reader { buf: buf }
+    Reader { buf }
 }
 
 impl<B: Buf> Reader<B> {
     /// Gets a reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::Buf;
-    /// use std::io::{self, Cursor};
+    /// use bytes::buf::BufExt;
     ///
-    /// let mut buf = Cursor::new(b"hello world").reader();
+    /// let mut buf = b"hello world".reader();
     ///
-    /// assert_eq!(0, buf.get_ref().position());
+    /// assert_eq!(b"hello world", buf.get_ref());
     /// ```
     pub fn get_ref(&self) -> &B {
         &self.buf
     }
 
     /// Gets a mutable reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
-    ///
-    /// # Examples
-    ///
-    /// ```rust
-    /// use bytes::Buf;
-    /// use std::io::{self, Cursor};
-    ///
-    /// let mut buf = Cursor::new(b"hello world").reader();
-    /// let mut dst = vec![];
-    ///
-    /// buf.get_mut().set_position(2);
-    /// io::copy(&mut buf, &mut dst).unwrap();
-    ///
-    /// assert_eq!(*dst, b"llo world"[..]);
-    /// ```
     pub fn get_mut(&mut self) -> &mut B {
         &mut self.buf
     }
 
     /// Consumes this `Reader`, returning the underlying value.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::Buf;
-    /// use std::io::{self, Cursor};
+    /// use bytes::{Buf, buf::BufExt};
+    /// use std::io;
     ///
-    /// let mut buf = Cursor::new(b"hello world").reader();
+    /// let mut buf = b"hello world".reader();
     /// let mut dst = vec![];
     ///
     /// io::copy(&mut buf, &mut dst).unwrap();
     ///
     /// let buf = buf.into_inner();
     /// assert_eq!(0, buf.remaining());
     /// ```
     pub fn into_inner(self) -> B {
@@ -81,8 +65,17 @@ impl<B: Buf> Reader<B> {
 impl<B: Buf + Sized> io::Read for Reader<B> {
     fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
         let len = cmp::min(self.buf.remaining(), dst.len());
 
         Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]);
         Ok(len)
     }
 }
+
+impl<B: Buf + Sized> io::BufRead for Reader<B> {
+    fn fill_buf(&mut self) -> io::Result<&[u8]> {
+        Ok(self.buf.bytes())
+    }
+    fn consume(&mut self, amt: usize) {
+        self.buf.advance(amt)
+    }
+}
copy from third_party/rust/bytes/src/buf/take.rs
copy to third_party/rust/bytes/src/buf/ext/take.rs
--- a/third_party/rust/bytes/src/buf/take.rs
+++ b/third_party/rust/bytes/src/buf/ext/take.rs
@@ -1,39 +1,38 @@
-use {Buf};
+use crate::Buf;
 
-use std::cmp;
+use core::cmp;
 
 /// A `Buf` adapter which limits the bytes read from an underlying buffer.
 ///
 /// This struct is generally created by calling `take()` on `Buf`. See
 /// documentation of [`take()`](trait.Buf.html#method.take) for more details.
 #[derive(Debug)]
 pub struct Take<T> {
     inner: T,
     limit: usize,
 }
 
 pub fn new<T>(inner: T, limit: usize) -> Take<T> {
     Take {
-        inner: inner,
-        limit: limit,
+        inner,
+        limit,
     }
 }
 
 impl<T> Take<T> {
     /// Consumes this `Take`, returning the underlying value.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
+    /// use bytes::buf::{Buf, BufMut, BufExt};
     ///
-    /// let mut buf = Cursor::new(b"hello world").take(2);
+    /// let mut buf = b"hello world".take(2);
     /// let mut dst = vec![];
     ///
     /// dst.put(&mut buf);
     /// assert_eq!(*dst, b"he"[..]);
     ///
     /// let mut buf = buf.into_inner();
     ///
     /// dst.clear();
@@ -46,41 +45,39 @@ impl<T> Take<T> {
 
     /// Gets a reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
+    /// use bytes::{Buf, buf::BufExt};
     ///
-    /// let mut buf = Cursor::new(b"hello world").take(2);
+    /// let mut buf = b"hello world".take(2);
     ///
-    /// assert_eq!(0, buf.get_ref().position());
+    /// assert_eq!(11, buf.get_ref().remaining());
     /// ```
     pub fn get_ref(&self) -> &T {
         &self.inner
     }
 
     /// Gets a mutable reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
+    /// use bytes::{Buf, BufMut, buf::BufExt};
     ///
-    /// let mut buf = Cursor::new(b"hello world").take(2);
+    /// let mut buf = b"hello world".take(2);
     /// let mut dst = vec![];
     ///
-    /// buf.get_mut().set_position(2);
+    /// buf.get_mut().advance(2);
     ///
     /// dst.put(&mut buf);
     /// assert_eq!(*dst, b"ll"[..]);
     /// ```
     pub fn get_mut(&mut self) -> &mut T {
         &mut self.inner
     }
 
@@ -89,20 +86,19 @@ impl<T> Take<T> {
     /// # Note
     ///
     /// If the inner `Buf` has fewer bytes than indicated by this method then
     /// that is the actual number of available bytes.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::Buf;
-    /// use std::io::Cursor;
+    /// use bytes::{Buf, buf::BufExt};
     ///
-    /// let mut buf = Cursor::new(b"hello world").take(2);
+    /// let mut buf = b"hello world".take(2);
     ///
     /// assert_eq!(2, buf.limit());
     /// assert_eq!(b'h', buf.get_u8());
     /// assert_eq!(1, buf.limit());
     /// ```
     pub fn limit(&self) -> usize {
         self.limit
     }
@@ -112,20 +108,19 @@ impl<T> Take<T> {
     /// # Note
     ///
     /// If the inner `Buf` has fewer bytes than `lim` then that is the actual
     /// number of available bytes.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, BufMut};
-    /// use std::io::Cursor;
+    /// use bytes::{Buf, BufMut, buf::BufExt};
     ///
-    /// let mut buf = Cursor::new(b"hello world").take(2);
+    /// let mut buf = b"hello world".take(2);
     /// let mut dst = vec![];
     ///
     /// dst.put(&mut buf);
     /// assert_eq!(*dst, b"he"[..]);
     ///
     /// dst.clear();
     ///
     /// buf.set_limit(3);
copy from third_party/rust/bytes/src/buf/writer.rs
copy to third_party/rust/bytes/src/buf/ext/writer.rs
--- a/third_party/rust/bytes/src/buf/writer.rs
+++ b/third_party/rust/bytes/src/buf/ext/writer.rs
@@ -1,73 +1,73 @@
-use BufMut;
+use crate::BufMut;
 
 use std::{cmp, io};
 
 /// A `BufMut` adapter which implements `io::Write` for the inner value.
 ///
 /// This struct is generally created by calling `writer()` on `BufMut`. See
 /// documentation of [`writer()`](trait.BufMut.html#method.writer) for more
 /// details.
 #[derive(Debug)]
 pub struct Writer<B> {
     buf: B,
 }
 
 pub fn new<B>(buf: B) -> Writer<B> {
-    Writer { buf: buf }
+    Writer { buf }
 }
 
 impl<B: BufMut> Writer<B> {
     /// Gets a reference to the underlying `BufMut`.
     ///
     /// It is inadvisable to directly write to the underlying `BufMut`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::BufMut;
+    /// use bytes::buf::BufMutExt;
     ///
     /// let mut buf = Vec::with_capacity(1024).writer();
     ///
     /// assert_eq!(1024, buf.get_ref().capacity());
     /// ```
     pub fn get_ref(&self) -> &B {
         &self.buf
     }
 
     /// Gets a mutable reference to the underlying `BufMut`.
     ///
     /// It is inadvisable to directly write to the underlying `BufMut`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::BufMut;
+    /// use bytes::buf::BufMutExt;
     ///
     /// let mut buf = vec![].writer();
     ///
     /// buf.get_mut().reserve(1024);
     ///
     /// assert_eq!(1024, buf.get_ref().capacity());
     /// ```
     pub fn get_mut(&mut self) -> &mut B {
         &mut self.buf
     }
 
     /// Consumes this `Writer`, returning the underlying value.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::BufMut;
-    /// use std::io::{self, Cursor};
+    /// use bytes::buf::BufMutExt;
+    /// use std::io;
     ///
     /// let mut buf = vec![].writer();
-    /// let mut src = Cursor::new(b"hello world");
+    /// let mut src = &b"hello world"[..];
     ///
     /// io::copy(&mut src, &mut buf).unwrap();
     ///
     /// let buf = buf.into_inner();
     /// assert_eq!(*buf, b"hello world"[..]);
     /// ```
     pub fn into_inner(self) -> B {
         self.buf
--- a/third_party/rust/bytes/src/buf/iter.rs
+++ b/third_party/rust/bytes/src/buf/iter.rs
@@ -1,47 +1,66 @@
-use Buf;
+use crate::Buf;
 
 /// Iterator over the bytes contained by the buffer.
 ///
 /// This struct is created by the [`iter`] method on [`Buf`].
 ///
 /// # Examples
 ///
 /// Basic usage:
 ///
 /// ```
-/// use bytes::{Buf, IntoBuf, Bytes};
+/// use bytes::{Buf, Bytes};
 ///
-/// let buf = Bytes::from(&b"abc"[..]).into_buf();
-/// let mut iter = buf.iter();
+/// let buf = Bytes::from(&b"abc"[..]);
+/// let mut iter = buf.into_iter();
 ///
 /// assert_eq!(iter.next(), Some(b'a'));
 /// assert_eq!(iter.next(), Some(b'b'));
 /// assert_eq!(iter.next(), Some(b'c'));
 /// assert_eq!(iter.next(), None);
 /// ```
 ///
 /// [`iter`]: trait.Buf.html#method.iter
 /// [`Buf`]: trait.Buf.html
 #[derive(Debug)]
-pub struct Iter<T> {
+pub struct IntoIter<T> {
     inner: T,
 }
 
-impl<T> Iter<T> {
-    /// Consumes this `Iter`, returning the underlying value.
+impl<T> IntoIter<T> {
+    /// Creates an iterator over the bytes contained by the buffer.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{Buf, Bytes};
+    /// use bytes::buf::IntoIter;
+    ///
+    /// let buf = Bytes::from_static(b"abc");
+    /// let mut iter = IntoIter::new(buf);
+    ///
+    /// assert_eq!(iter.next(), Some(b'a'));
+    /// assert_eq!(iter.next(), Some(b'b'));
+    /// assert_eq!(iter.next(), Some(b'c'));
+    /// assert_eq!(iter.next(), None);
+    /// ```
+    pub fn new(inner: T) -> IntoIter<T> {
+        IntoIter { inner }
+    }
+    /// Consumes this `IntoIter`, returning the underlying value.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, IntoBuf, Bytes};
+    /// use bytes::{Buf, Bytes};
     ///
-    /// let buf = Bytes::from(&b"abc"[..]).into_buf();
-    /// let mut iter = buf.iter();
+    /// let buf = Bytes::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
     ///
     /// assert_eq!(iter.next(), Some(b'a'));
     ///
     /// let buf = iter.into_inner();
     /// assert_eq!(2, buf.remaining());
     /// ```
     pub fn into_inner(self) -> T {
         self.inner
@@ -49,68 +68,66 @@ impl<T> Iter<T> {
 
     /// Gets a reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, IntoBuf, Bytes};
+    /// use bytes::{Buf, Bytes};
     ///
-    /// let buf = Bytes::from(&b"abc"[..]).into_buf();
-    /// let mut iter = buf.iter();
+    /// let buf = Bytes::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
     ///
     /// assert_eq!(iter.next(), Some(b'a'));
     ///
     /// assert_eq!(2, iter.get_ref().remaining());
     /// ```
     pub fn get_ref(&self) -> &T {
         &self.inner
     }
 
     /// Gets a mutable reference to the underlying `Buf`.
     ///
     /// It is inadvisable to directly read from the underlying `Buf`.
     ///
     /// # Examples
     ///
     /// ```rust
-    /// use bytes::{Buf, IntoBuf, BytesMut};
+    /// use bytes::{Buf, BytesMut};
     ///
-    /// let buf = BytesMut::from(&b"abc"[..]).into_buf();
-    /// let mut iter = buf.iter();
+    /// let buf = BytesMut::from(&b"abc"[..]);
+    /// let mut iter = buf.into_iter();
     ///
     /// assert_eq!(iter.next(), Some(b'a'));
     ///
-    /// iter.get_mut().set_position(0);
+    /// iter.get_mut().advance(1);
     ///
-    /// assert_eq!(iter.next(), Some(b'a'));
+    /// assert_eq!(iter.next(), Some(b'c'));
     /// ```
     pub fn get_mut(&mut self) -> &mut T {
         &mut self.inner
     }
 }
 
-pub fn new<T>(inner: T) -> Iter<T> {
-    Iter { inner: inner }
-}
 
-impl<T: Buf> Iterator for Iter<T> {
+impl<T: Buf> Iterator for IntoIter<T> {
     type Item = u8;
 
     fn next(&mut self) -> Option<u8> {
         if !self.inner.has_remaining() {
             return None;
         }
 
         let b = self.inner.bytes()[0];
         self.inner.advance(1);
+
         Some(b)
     }
 
     fn size_hint(&self) -> (usize, Option<usize>) {
         let rem = self.inner.remaining();
         (rem, Some(rem))
     }
 }
 
-impl<T: Buf> ExactSizeIterator for Iter<T> { }
+impl<T: Buf> ExactSizeIterator for IntoIter<T> { }
--- a/third_party/rust/bytes/src/buf/mod.rs
+++ b/third_party/rust/bytes/src/buf/mod.rs
@@ -11,27 +11,21 @@
 //! performance over `Iterator` by providing an API optimized for byte slices.
 //!
 //! See [`Buf`] and [`BufMut`] for more details.
 //!
 //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
 //! [`Buf`]: trait.Buf.html
 //! [`BufMut`]: trait.BufMut.html
 
-mod buf;
+mod buf_impl;
 mod buf_mut;
-mod from_buf;
-mod chain;
-mod into_buf;
+pub mod ext;
 mod iter;
-mod reader;
-mod take;
-mod writer;
+mod vec_deque;
 
-pub use self::buf::Buf;
+pub use self::buf_impl::Buf;
 pub use self::buf_mut::BufMut;
-pub use self::from_buf::FromBuf;
-pub use self::chain::Chain;
-pub use self::into_buf::IntoBuf;
-pub use self::iter::Iter;
-pub use self::reader::Reader;
-pub use self::take::Take;
-pub use self::writer::Writer;
+pub use self::ext::{BufExt, BufMutExt};
+#[cfg(feature = "std")]
+pub use self::buf_mut::IoSliceMut;
+pub use self::iter::IntoIter;
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/src/buf/vec_deque.rs
@@ -0,0 +1,22 @@
+use alloc::collections::VecDeque;
+
+use super::Buf;
+
+impl Buf for VecDeque<u8> {
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    fn bytes(&self) -> &[u8] {
+        let (s1, s2) = self.as_slices();
+        if s1.is_empty() {
+            s2
+        } else {
+            s1
+        }
+    }
+
+    fn advance(&mut self, cnt: usize) {
+        self.drain(..cnt);
+    }
+}
--- a/third_party/rust/bytes/src/bytes.rs
+++ b/third_party/rust/bytes/src/bytes.rs
@@ -1,63 +1,49 @@
-use {IntoBuf, Buf, BufMut};
-use buf::Iter;
-use debug;
+use core::{cmp, fmt, hash, mem, ptr, slice, usize};
+use core::iter::{FromIterator};
+use core::ops::{Deref, RangeBounds};
 
-use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize};
-use std::borrow::{Borrow, BorrowMut};
-use std::io::Cursor;
-use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
-use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel};
-use std::iter::{FromIterator, Iterator};
+use alloc::{vec::Vec, string::String, boxed::Box, borrow::Borrow};
+
+use crate::Buf;
+use crate::buf::IntoIter;
+use crate::debug;
+use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
 
 /// A reference counted contiguous slice of memory.
 ///
 /// `Bytes` is an efficient container for storing and operating on contiguous
 /// slices of memory. It is intended for use primarily in networking code, but
 /// could have applications elsewhere as well.
 ///
 /// `Bytes` values facilitate zero-copy network programming by allowing multiple
 /// `Bytes` objects to point to the same underlying memory. This is managed by
 /// using a reference count to track when the memory is no longer needed and can
 /// be freed.
 ///
 /// ```
 /// use bytes::Bytes;
 ///
-/// let mut mem = Bytes::from(&b"Hello world"[..]);
-/// let a = mem.slice(0, 5);
+/// let mut mem = Bytes::from("Hello world");
+/// let a = mem.slice(0..5);
 ///
-/// assert_eq!(&a[..], b"Hello");
+/// assert_eq!(a, "Hello");
 ///
 /// let b = mem.split_to(6);
 ///
-/// assert_eq!(&mem[..], b"world");
-/// assert_eq!(&b[..], b"Hello ");
+/// assert_eq!(mem, "world");
+/// assert_eq!(b, "Hello ");
 /// ```
 ///
 /// # Memory layout
 ///
-/// The `Bytes` struct itself is fairly small, limited to a pointer to the
-/// memory and 4 `usize` fields used to track information about which segment of
-/// the underlying memory the `Bytes` handle has access to.
-///
-/// The memory layout looks like this:
-///
-/// ```text
-/// +-------+
-/// | Bytes |
-/// +-------+
-///  /      \_____
-/// |              \
-/// v               v
-/// +-----+------------------------------------+
-/// | Arc |         |      Data     |          |
-/// +-----+------------------------------------+
-/// ```
+/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
+/// to track information about which segment of the underlying memory the
+/// `Bytes` handle has access to.
 ///
 /// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
 /// slice and a pointer to the start of the region visible by the handle.
 /// `Bytes` also tracks the length of its view into the memory.
 ///
 /// # Sharing
 ///
 /// The memory itself is reference counted, and multiple `Bytes` objects may
@@ -76,509 +62,224 @@ use std::iter::{FromIterator, Iterator};
 /// |           +-----------+     |         |
 /// |           |           | ___/ data     | tail
 /// |      data |      tail |/              |
 /// v           v           v               v
 /// +-----+---------------------------------+-----+
 /// | Arc |     |           |               |     |
 /// +-----+---------------------------------+-----+
 /// ```
-///
-/// # Mutating
-///
-/// While `Bytes` handles may potentially represent overlapping views of the
-/// underlying memory slice and may not be mutated, `BytesMut` handles are
-/// guaranteed to be the only handle able to view that slice of memory. As such,
-/// `BytesMut` handles are able to mutate the underlying memory. Note that
-/// holding a unique view to a region of memory does not mean that there are no
-/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
-/// memory.
-///
-/// # Inline bytes
-///
-/// As an optimization, when the slice referenced by a `Bytes` or `BytesMut`
-/// handle is small enough [^1], `with_capacity` will avoid the allocation by
-/// inlining the slice directly in the handle. In this case, a clone is no
-/// longer "shallow" and the data will be copied.  Converting from a `Vec` will
-/// never use inlining.
-///
-/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
-///
 pub struct Bytes {
-    inner: Inner,
-}
-
-/// A unique reference to a contiguous slice of memory.
-///
-/// `BytesMut` represents a unique view into a potentially shared memory region.
-/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
-/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
-/// allocations.
-///
-/// For more detail, see [Bytes](struct.Bytes.html).
-///
-/// # Growth
-///
-/// One key difference from `Vec<u8>` is that most operations **do not
-/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
-/// world");` could panic if `my_bytes` does not have enough capacity. Before
-/// writing to the buffer, ensure that there is enough remaining capacity by
-/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
-/// is preferable.
-///
-/// The only exception is `extend` which implicitly reserves required capacity.
-///
-/// # Examples
-///
-/// ```
-/// use bytes::{BytesMut, BufMut};
-///
-/// let mut buf = BytesMut::with_capacity(64);
-///
-/// buf.put(b'h');
-/// buf.put(b'e');
-/// buf.put("llo");
-///
-/// assert_eq!(&buf[..], b"hello");
-///
-/// // Freeze the buffer so that it can be shared
-/// let a = buf.freeze();
-///
-/// // This does not allocate, instead `b` points to the same memory.
-/// let b = a.clone();
-///
-/// assert_eq!(&a[..], b"hello");
-/// assert_eq!(&b[..], b"hello");
-/// ```
-pub struct BytesMut {
-    inner: Inner,
+    ptr: *const u8,
+    len: usize,
+    // inlined "trait object"
+    data: AtomicPtr<()>,
+    vtable: &'static Vtable,
 }
 
-// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
-// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
-// that mutate the underlying buffer are only performed when the data range
-// being mutated is only available via a single `BytesMut` handle.
-//
-// # Data storage modes
-//
-// The goal of `bytes` is to be as efficient as possible across a wide range of
-// potential usage patterns. As such, `bytes` needs to be able to handle buffers
-// that are never shared, shared on a single thread, and shared across many
-// threads. `bytes` also needs to handle both tiny buffers as well as very large
-// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
-// been known to be in the hundreds of megabyte, and HTTP header values can be a
-// few characters in size.
-//
-// To achieve high performance in these various situations, `Bytes` and
-// `BytesMut` use different strategies for storing the buffer depending on the
-// usage pattern.
-//
-// ## Delayed `Arc` allocation
-//
-// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
-// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
-// not used and the buffer is backed by a `Vec<u8>` directly. Using an
-// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
-// shared, that allocation is avoided.
-//
-// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
-// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
-// into an `Arc` and both the original handle and the new handle use the same
-// buffer via the `Arc`.
-//
-// * `Arc` is being used to signify an atomically reference counted cell. We
-// don't use the `Arc` implementation provided by `std` and instead use our own.
-// This ends up simplifying a number of the `unsafe` code snippets.
-//
-// ## Inlining small buffers
-//
-// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
-// systems, this ends up being 32 bytes, which is actually a lot of storage for
-// cases where `Bytes` is being used to represent small byte strings, such as
-// HTTP header names and values.
-//
-// To avoid any allocation at all in these cases, `Bytes` will use the struct
-// itself for storing the buffer, reserving 1 byte for meta data. This means
-// that, on 64 bit systems, 31 byte buffers require no allocation at all.
-//
-// The byte used for metadata stores a 2 bits flag used to indicate that the
-// buffer is stored inline as well as 6 bits for tracking the buffer length (the
-// return value of `Bytes::len`).
-//
-// ## Static buffers
-//
-// `Bytes` can also represent a static buffer, which is created with
-// `Bytes::from_static`. No copying or allocations are required for tracking
-// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
-// tracking that the `Bytes` instance represents a static buffer is stored in
-// the `Bytes` struct.
-//
-// # Struct layout
-//
-// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
-// data fields as well as all of the function implementations.
-//
-// The `Inner` struct is carefully laid out in order to support the
-// functionality described above as well as being as small as possible. Size is
-// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
-// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
-// map structure.
-//
-// The `Inner` struct contains the following fields:
-//
-// * `ptr: *mut u8`
-// * `len: usize`
-// * `cap: usize`
-// * `arc: AtomicPtr<Shared>`
-//
-// ## `ptr: *mut u8`
-//
-// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
-// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
-// may have been shifted to point somewhere inside the buffer.
-//
-// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
-//
-// ## `len: usize`
-//
-// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
-// always the `Vec`'s length. The slice represented by `ptr` and `len` should
-// (ideally) always be initialized memory.
-//
-// When in "inlined" mode, `len` is used as part of the inlined buffer.
-//
-// ## `cap: usize`
-//
-// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
-// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
-// may or may not be initialized memory.
-//
-// When in "inlined" mode, `cap` is used as part of the inlined buffer.
-//
-// ## `arc: AtomicPtr<Shared>`
-//
-// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
-// will be the pointer to the `Arc` structure tracking the ref count for the
-// underlying buffer. When the pointer is null, then the `Arc` has not been
-// allocated yet and `self` is the only outstanding handle for the underlying
-// buffer.
-//
-// The lower two bits of `arc` are used to track the storage mode of `Inner`.
-// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
-// indicates vector storage, not yet promoted to Arc.  Since pointers to
-// allocated structures are aligned, the lower two bits of a pointer will always
-// be 0. This allows disambiguating between a pointer and the two flags.
-//
-// When in "inlined" mode, the least significant byte of `arc` is also used to
-// store the length of the buffer view (vs. the capacity, which is a constant).
-//
-// The rest of `arc`'s bytes are used as part of the inline buffer, which means
-// that those bytes need to be located next to the `ptr`, `len`, and `cap`
-// fields, which make up the rest of the inline buffer. This requires special
-// casing the layout of `Inner` depending on if the target platform is bit or
-// little endian.
-//
-// On little endian platforms, the `arc` field must be the first field in the
-// struct. On big endian platforms, the `arc` field must be the last field in
-// the struct. Since a deterministic struct layout is required, `Inner` is
-// annotated with `#[repr(C)]`.
-//
-// # Thread safety
-//
-// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
-// by bumping the buffer ref count and returning a new struct pointing to the
-// same buffer. However, the `Arc` structure is lazily allocated. This means
-// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
-// function can be called concurrently from multiple threads. This is why an
-// `AtomicPtr` is used for the `arc` field vs. a `*const`.
-//
-// Care is taken to ensure that the need for synchronization is minimized. Most
-// operations do not require any synchronization.
-//
-#[cfg(target_endian = "little")]
-#[repr(C)]
-struct Inner {
-    // WARNING: Do not access the fields directly unless you know what you are
-    // doing. Instead, use the fns. See implementation comment above.
-    arc: AtomicPtr<Shared>,
-    ptr: *mut u8,
-    len: usize,
-    cap: usize,
+pub(crate) struct Vtable {
+    /// fn(data, ptr, len)
+    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
+    /// fn(data, ptr, len)
+    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
 }
 
-#[cfg(target_endian = "big")]
-#[repr(C)]
-struct Inner {
-    // WARNING: Do not access the fields directly unless you know what you are
-    // doing. Instead, use the fns. See implementation comment above.
-    ptr: *mut u8,
-    len: usize,
-    cap: usize,
-    arc: AtomicPtr<Shared>,
-}
-
-// Thread-safe reference-counted container for the shared storage. This mostly
-// the same as `std::sync::Arc` but without the weak counter. The ref counting
-// fns are based on the ones found in `std`.
-//
-// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
-// up making the overall code simpler and easier to reason about. This is due to
-// some of the logic around setting `Inner::arc` and other ways the `arc` field
-// is used. Using `Arc` ended up requiring a number of funky transmutes and
-// other shenanigans to make it work.
-struct Shared {
-    vec: Vec<u8>,
-    original_capacity_repr: usize,
-    ref_count: AtomicUsize,
-}
-
-// Buffer storage strategy flags.
-const KIND_ARC: usize = 0b00;
-const KIND_INLINE: usize = 0b01;
-const KIND_STATIC: usize = 0b10;
-const KIND_VEC: usize = 0b11;
-const KIND_MASK: usize = 0b11;
-
-// The max original capacity value. Any `Bytes` allocated with a greater initial
-// capacity will default to this.
-const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
-// The original capacity algorithm will not take effect unless the originally
-// allocated capacity was at least 1kb in size.
-const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
-// The original capacity is stored in powers of 2 starting at 1kb to a max of
-// 64kb. Representing it as such requires only 3 bits of storage.
-const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
-const ORIGINAL_CAPACITY_OFFSET: usize = 2;
-
-// When the storage is in the `Vec` representation, the pointer can be advanced
-// at most this value. This is due to the amount of storage available to track
-// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
-// bits.
-const VEC_POS_OFFSET: usize = 5;
-const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
-const NOT_VEC_POS_MASK: usize = 0b11111;
-
-// Bit op constants for extracting the inline length value from the `arc` field.
-const INLINE_LEN_MASK: usize = 0b11111100;
-const INLINE_LEN_OFFSET: usize = 2;
-
-// Byte offset from the start of `Inner` to where the inline buffer data
-// starts. On little endian platforms, the first byte of the struct is the
-// storage flag, so the data is shifted by a byte. On big endian systems, the
-// data starts at the beginning of the struct.
-#[cfg(target_endian = "little")]
-const INLINE_DATA_OFFSET: isize = 1;
-#[cfg(target_endian = "big")]
-const INLINE_DATA_OFFSET: isize = 0;
-
-#[cfg(target_pointer_width = "64")]
-const PTR_WIDTH: usize = 64;
-#[cfg(target_pointer_width = "32")]
-const PTR_WIDTH: usize = 32;
-
-// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
-// metadata.
-#[cfg(target_pointer_width = "64")]
-const INLINE_CAP: usize = 4 * 8 - 1;
-#[cfg(target_pointer_width = "32")]
-const INLINE_CAP: usize = 4 * 4 - 1;
-
-/*
- *
- * ===== Bytes =====
- *
- */
-
 impl Bytes {
-    /// Creates a new `Bytes` with the specified capacity.
-    ///
-    /// The returned `Bytes` will be able to hold at least `capacity` bytes
-    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
-    /// then `BytesMut` will not allocate.
-    ///
-    /// It is important to note that this function does not specify the length
-    /// of the returned `Bytes`, but only the capacity.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::Bytes;
-    ///
-    /// let mut bytes = Bytes::with_capacity(64);
-    ///
-    /// // `bytes` contains no data, even though there is capacity
-    /// assert_eq!(bytes.len(), 0);
-    ///
-    /// bytes.extend_from_slice(&b"hello world"[..]);
-    ///
-    /// assert_eq!(&bytes[..], b"hello world");
-    /// ```
-    #[inline]
-    pub fn with_capacity(capacity: usize) -> Bytes {
-        Bytes {
-            inner: Inner::with_capacity(capacity),
-        }
-    }
-
     /// Creates a new empty `Bytes`.
     ///
     /// This will not allocate and the returned `Bytes` handle will be empty.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let b = Bytes::new();
     /// assert_eq!(&b[..], b"");
     /// ```
     #[inline]
     pub fn new() -> Bytes {
-        Bytes::with_capacity(0)
+        Bytes::from_static(b"")
     }
 
     /// Creates a new `Bytes` from a static slice.
     ///
     /// The returned `Bytes` will point directly to the static slice. There is
     /// no allocating or copying.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let b = Bytes::from_static(b"hello");
     /// assert_eq!(&b[..], b"hello");
     /// ```
     #[inline]
+    #[cfg(not(all(loom, test)))]
+    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
+        Bytes {
+            ptr: bytes.as_ptr(),
+            len: bytes.len(),
+            data: AtomicPtr::new(ptr::null_mut()),
+            vtable: &STATIC_VTABLE,
+        }
+    }
+
+    #[cfg(all(loom, test))]
     pub fn from_static(bytes: &'static [u8]) -> Bytes {
         Bytes {
-            inner: Inner::from_static(bytes),
+            ptr: bytes.as_ptr(),
+            len: bytes.len(),
+            data: AtomicPtr::new(ptr::null_mut()),
+            vtable: &STATIC_VTABLE,
         }
     }
 
     /// Returns the number of bytes contained in this `Bytes`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let b = Bytes::from(&b"hello"[..]);
     /// assert_eq!(b.len(), 5);
     /// ```
     #[inline]
     pub fn len(&self) -> usize {
-        self.inner.len()
+        self.len
     }
 
     /// Returns true if the `Bytes` has a length of 0.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let b = Bytes::new();
     /// assert!(b.is_empty());
     /// ```
     #[inline]
     pub fn is_empty(&self) -> bool {
-        self.inner.is_empty()
+        self.len == 0
     }
 
-    /// Returns a slice of self for the index range `[begin..end)`.
+
+    ///Creates `Bytes` instance from slice, by copying it.
+    pub fn copy_from_slice(data: &[u8]) -> Self {
+        data.to_vec().into()
+    }
+
+    /// Returns a slice of self for the provided range.
     ///
     /// This will increment the reference count for the underlying memory and
     /// return a new `Bytes` handle set to the slice.
     ///
     /// This operation is `O(1)`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let a = Bytes::from(&b"hello world"[..]);
-    /// let b = a.slice(2, 5);
+    /// let b = a.slice(2..5);
     ///
     /// assert_eq!(&b[..], b"llo");
     /// ```
     ///
     /// # Panics
     ///
     /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
     /// will panic.
-    pub fn slice(&self, begin: usize, end: usize) -> Bytes {
-        assert!(begin <= end);
-        assert!(end <= self.len());
+    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
+        use core::ops::Bound;
+
+        let len = self.len();
+
+        let begin = match range.start_bound() {
+            Bound::Included(&n) => n,
+            Bound::Excluded(&n) => n + 1,
+            Bound::Unbounded => 0,
+        };
 
-        if end - begin <= INLINE_CAP {
-            return Bytes::from(&self[begin..end]);
+        let end = match range.end_bound() {
+            Bound::Included(&n) => n + 1,
+            Bound::Excluded(&n) => n,
+            Bound::Unbounded => len,
+        };
+
+        assert!(begin <= end);
+        assert!(end <= len);
+
+        if end == begin {
+            return Bytes::new();
         }
 
+
         let mut ret = self.clone();
 
-        unsafe {
-            ret.inner.set_end(end);
-            ret.inner.set_start(begin);
-        }
+        ret.len = end - begin;
+        ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
 
         ret
     }
 
-    /// Returns a slice of self for the index range `[begin..self.len())`.
+    /// Returns a slice of self that is equivalent to the given `subset`.
     ///
-    /// This will increment the reference count for the underlying memory and
-    /// return a new `Bytes` handle set to the slice.
+    /// When processing a `Bytes` buffer with other tools, one often gets a
+    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
+    /// This function turns that `&[u8]` into another `Bytes`, as if one had
+    /// called `self.slice()` with the offsets that correspond to `subset`.
     ///
-    /// This operation is `O(1)` and is equivalent to `self.slice(begin,
-    /// self.len())`.
+    /// This operation is `O(1)`.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
-    /// let a = Bytes::from(&b"hello world"[..]);
-    /// let b = a.slice_from(6);
-    ///
-    /// assert_eq!(&b[..], b"world");
+    /// let bytes = Bytes::from(&b"012345678"[..]);
+    /// let as_slice = bytes.as_ref();
+    /// let subset = &as_slice[2..6];
+    /// let subslice = bytes.slice_ref(&subset);
+    /// assert_eq!(&subslice[..], b"2345");
     /// ```
     ///
     /// # Panics
     ///
-    /// Requires that `begin <= self.len()`, otherwise slicing will panic.
-    pub fn slice_from(&self, begin: usize) -> Bytes {
-        self.slice(begin, self.len())
-    }
+    /// Requires that the given `sub` slice is in fact contained within the
+    /// `Bytes` buffer; otherwise this function will panic.
+    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
+        let bytes_p = self.as_ptr() as usize;
+        let bytes_len = self.len();
+
+        let sub_p = subset.as_ptr() as usize;
+        let sub_len = subset.len();
 
-    /// Returns a slice of self for the index range `[0..end)`.
-    ///
-    /// This will increment the reference count for the underlying memory and
-    /// return a new `Bytes` handle set to the slice.
-    ///
-    /// This operation is `O(1)` and is equivalent to `self.slice(0, end)`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::Bytes;
-    ///
-    /// let a = Bytes::from(&b"hello world"[..]);
-    /// let b = a.slice_to(5);
-    ///
-    /// assert_eq!(&b[..], b"hello");
-    /// ```
-    ///
-    /// # Panics
-    ///
-    /// Requires that `end <= self.len()`, otherwise slicing will panic.
-    pub fn slice_to(&self, end: usize) -> Bytes {
-        self.slice(0, end)
+        assert!(
+            sub_p >= bytes_p,
+            "subset pointer ({:p}) is smaller than self pointer ({:p})",
+            sub_p as *const u8,
+            bytes_p as *const u8,
+        );
+        assert!(
+            sub_p + sub_len <= bytes_p + bytes_len,
+            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
+            bytes_p as *const u8,
+            bytes_len,
+            sub_p as *const u8,
+            sub_len,
+        );
+
+        let sub_offset = sub_p - bytes_p;
+
+        self.slice(sub_offset..(sub_offset + sub_len))
     }
 
     /// Splits the bytes into two at the given index.
     ///
     /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
     /// contains elements `[at, len)`.
     ///
     /// This is an `O(1)` operation that just increases the reference count and
@@ -594,30 +295,35 @@ impl Bytes {
     ///
     /// assert_eq!(&a[..], b"hello");
     /// assert_eq!(&b[..], b" world");
     /// ```
     ///
     /// # Panics
     ///
     /// Panics if `at > len`.
+    #[must_use = "consider Bytes::truncate if you don't need the other half"]
     pub fn split_off(&mut self, at: usize) -> Bytes {
         assert!(at <= self.len());
 
         if at == self.len() {
             return Bytes::new();
         }
 
         if at == 0 {
             return mem::replace(self, Bytes::new());
         }
 
-        Bytes {
-            inner: self.inner.split_off(at),
-        }
+        let mut ret = self.clone();
+
+        self.len = at;
+
+        unsafe { ret.inc_start(at) };
+
+        ret
     }
 
     /// Splits the bytes into two at the given index.
     ///
     /// Afterwards `self` contains elements `[at, len)`, and the returned
     /// `Bytes` contains elements `[0, at)`.
     ///
     /// This is an `O(1)` operation that just increases the reference count and
@@ -633,36 +339,35 @@ impl Bytes {
     ///
     /// assert_eq!(&a[..], b" world");
     /// assert_eq!(&b[..], b"hello");
     /// ```
     ///
     /// # Panics
     ///
     /// Panics if `at > len`.
+    #[must_use = "consider Bytes::advance if you don't need the other half"]
     pub fn split_to(&mut self, at: usize) -> Bytes {
         assert!(at <= self.len());
 
         if at == self.len() {
             return mem::replace(self, Bytes::new());
         }
 
         if at == 0 {
             return Bytes::new();
         }
 
-        Bytes {
-            inner: self.inner.split_to(at),
-        }
-    }
+
+        let mut ret = self.clone();
 
-    #[deprecated(since = "0.4.1", note = "use split_to instead")]
-    #[doc(hidden)]
-    pub fn drain_to(&mut self, at: usize) -> Bytes {
-        self.split_to(at)
+        unsafe { self.inc_start(at) };
+
+        ret.len = at;
+        ret
     }
 
     /// Shortens the buffer, keeping the first `len` bytes and dropping the
     /// rest.
     ///
     /// If `len` is greater than the buffer's current length, this has no
     /// effect.
     ///
@@ -675,2051 +380,202 @@ impl Bytes {
     /// use bytes::Bytes;
     ///
     /// let mut buf = Bytes::from(&b"hello world"[..]);
     /// buf.truncate(5);
     /// assert_eq!(buf, b"hello"[..]);
     /// ```
     ///
     /// [`split_off`]: #method.split_off
+    #[inline]
     pub fn truncate(&mut self, len: usize) {
-        self.inner.truncate(len);
-    }
-
-    /// Shortens the buffer, dropping the first `cnt` bytes and keeping the
-    /// rest.
-    ///
-    /// This is the same function as `Buf::advance`, and in the next breaking
-    /// release of `bytes`, this implementation will be removed in favor of
-    /// having `Bytes` implement `Buf`.
-    ///
-    /// # Panics
-    ///
-    /// This function panics if `cnt` is greater than `self.len()`
-    #[inline]
-    pub fn advance(&mut self, cnt: usize) {
-        assert!(cnt <= self.len(), "cannot advance past `remaining`");
-        unsafe { self.inner.set_start(cnt); }
+        if len < self.len {
+            self.len = len;
+        }
     }
 
     /// Clears the buffer, removing all data.
     ///
     /// # Examples
     ///
     /// ```
     /// use bytes::Bytes;
     ///
     /// let mut buf = Bytes::from(&b"hello world"[..]);
     /// buf.clear();
     /// assert!(buf.is_empty());
     /// ```
+    #[inline]
     pub fn clear(&mut self) {
         self.truncate(0);
     }
 
-    /// Attempts to convert into a `BytesMut` handle.
-    ///
-    /// This will only succeed if there are no other outstanding references to
-    /// the underlying chunk of memory. `Bytes` handles that contain inlined
-    /// bytes will always be convertable to `BytesMut`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::Bytes;
-    ///
-    /// let a = Bytes::from(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
-    ///
-    /// // Create a shallow clone
-    /// let b = a.clone();
-    ///
-    /// // This will fail because `b` shares a reference with `a`
-    /// let a = a.try_mut().unwrap_err();
-    ///
-    /// drop(b);
-    ///
-    /// // This will succeed
-    /// let mut a = a.try_mut().unwrap();
-    ///
-    /// a[0] = b'b';
-    ///
-    /// assert_eq!(&a[..4], b"bary");
-    /// ```
-    pub fn try_mut(mut self) -> Result<BytesMut, Bytes> {
-        if self.inner.is_mut_safe() {
-            Ok(BytesMut { inner: self.inner })
-        } else {
-            Err(self)
+    #[inline]
+    pub(crate) unsafe fn with_vtable(ptr: *const u8, len: usize, data: AtomicPtr<()>, vtable: &'static Vtable) -> Bytes {
+        Bytes {
+            ptr,
+            len,
+            data,
+            vtable,
+        }
+    }
+
+    // private
+
+    #[inline]
+    fn as_slice(&self) -> &[u8] {
+        unsafe {
+            slice::from_raw_parts(self.ptr, self.len)
         }
     }
 
-    /// Appends given bytes to this object.
-    ///
-    /// If this `Bytes` object has not enough capacity, it is resized first.
-    /// If it is shared (`refcount > 1`), it is copied first.
-    ///
-    /// This operation can be less effective than the similar operation on
-    /// `BytesMut`, especially on small additions.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::Bytes;
-    ///
-    /// let mut buf = Bytes::from("aabb");
-    /// buf.extend_from_slice(b"ccdd");
-    /// buf.extend_from_slice(b"eeff");
-    ///
-    /// assert_eq!(b"aabbccddeeff", &buf[..]);
-    /// ```
-    pub fn extend_from_slice(&mut self, extend: &[u8]) {
-        if extend.is_empty() {
-            return;
-        }
-
-        let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow");
-
-        let result = match mem::replace(self, Bytes::new()).try_mut() {
-            Ok(mut bytes_mut) => {
-                bytes_mut.extend_from_slice(extend);
-                bytes_mut
-            },
-            Err(bytes) => {
-                let mut bytes_mut = BytesMut::with_capacity(new_cap);
-                bytes_mut.put_slice(&bytes);
-                bytes_mut.put_slice(extend);
-                bytes_mut
-            }
-        };
-
-        mem::replace(self, result.freeze());
+    #[inline]
+    unsafe fn inc_start(&mut self, by: usize) {
+        // should already be asserted, but debug assert for tests
+        debug_assert!(self.len >= by);
+        self.len -= by;
+        self.ptr = self.ptr.offset(by as isize);
     }
 }
 
-impl IntoBuf for Bytes {
-    type Buf = Cursor<Self>;
+// Vtable must enforce this behavior
+unsafe impl Send for Bytes {}
+unsafe impl Sync for Bytes {}
 
-    fn into_buf(self) -> Self::Buf {
-        Cursor::new(self)
-    }
-}
-
-impl<'a> IntoBuf for &'a Bytes {
-    type Buf = Cursor<Self>;
-
-    fn into_buf(self) -> Self::Buf {
-        Cursor::new(self)
+impl Drop for Bytes {
+    #[inline]
+    fn drop(&mut self) {
+        unsafe {
+            (self.vtable.drop)(&mut self.data, self.ptr, self.len)
+        }
     }
 }
 
 impl Clone for Bytes {
+    #[inline]
     fn clone(&self) -> Bytes {
-        Bytes {
-            inner: unsafe { self.inner.shallow_clone(false) },
+        unsafe {
+            (self.vtable.clone)(&self.data, self.ptr, self.len)
         }
     }
 }
 
+impl fmt::Debug for Bytes {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&debug::BsDebug(&self.as_slice()), f)
+    }
+}
+
+impl Buf for Bytes {
+    #[inline]
+    fn remaining(&self) -> usize {
+        self.len()
+    }
+
+    #[inline]
+    fn bytes(&self) -> &[u8] {
+        self.as_slice()
+    }
+
+    #[inline]
+    fn advance(&mut self, cnt: usize) {
+        assert!(cnt <= self.len(), "cannot advance past `remaining`");
+        unsafe {
+            self.inc_start(cnt);
+        }
+    }
+
+    fn to_bytes(&mut self) -> crate::Bytes {
+        core::mem::replace(self, Bytes::new())
+    }
+}
+
+impl Deref for Bytes {
+    type Target = [u8];
+
+    #[inline]
+    fn deref(&self) -> &[u8] {
+        self.as_slice()
+    }
+}
+
 impl AsRef<[u8]> for Bytes {
     #[inline]
     fn as_ref(&self) -> &[u8] {
-        self.inner.as_ref()
-    }
-}
-
-impl ops::Deref for Bytes {
-    type Target = [u8];
-
-    #[inline]
-    fn deref(&self) -> &[u8] {
-        self.inner.as_ref()
-    }
-}
-
-impl From<BytesMut> for Bytes {
-    fn from(src: BytesMut) -> Bytes {
-        src.freeze()
-    }
-}
-
-impl From<Vec<u8>> for Bytes {
-    fn from(src: Vec<u8>) -> Bytes {
-        BytesMut::from(src).freeze()
-    }
-}
-
-impl From<String> for Bytes {
-    fn from(src: String) -> Bytes {
-        BytesMut::from(src).freeze()
-    }
-}
-
-impl<'a> From<&'a [u8]> for Bytes {
-    fn from(src: &'a [u8]) -> Bytes {
-        BytesMut::from(src).freeze()
-    }
-}
-
-impl<'a> From<&'a str> for Bytes {
-    fn from(src: &'a str) -> Bytes {
-        BytesMut::from(src).freeze()
-    }
-}
-
-impl FromIterator<u8> for BytesMut {
-    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
-        let iter = into_iter.into_iter();
-        let (min, maybe_max) = iter.size_hint();
-
-        let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
-
-        for i in iter {
-            out.reserve(1);
-            out.put(i);
-        }
-
-        out
-    }
-}
-
-impl FromIterator<u8> for Bytes {
-    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
-        BytesMut::from_iter(into_iter).freeze()
-    }
-}
-
-impl PartialEq for Bytes {
-    fn eq(&self, other: &Bytes) -> bool {
-        self.inner.as_ref() == other.inner.as_ref()
-    }
-}
-
-impl PartialOrd for Bytes {
-    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(other.inner.as_ref())
-    }
-}
-
-impl Ord for Bytes {
-    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
-        self.inner.as_ref().cmp(other.inner.as_ref())
-    }
-}
-
-impl Eq for Bytes {
-}
-
-impl Default for Bytes {
-    #[inline]
-    fn default() -> Bytes {
-        Bytes::new()
-    }
-}
-
-impl fmt::Debug for Bytes {
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
+        self.as_slice()
     }
 }
 
 impl hash::Hash for Bytes {
     fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
-        let s: &[u8] = self.as_ref();
-        s.hash(state);
+        self.as_slice().hash(state);
     }
 }
 
 impl Borrow<[u8]> for Bytes {
     fn borrow(&self) -> &[u8] {
-        self.as_ref()
+        self.as_slice()
     }
 }
 
 impl IntoIterator for Bytes {
     type Item = u8;
-    type IntoIter = Iter<Cursor<Bytes>>;
+    type IntoIter = IntoIter<Bytes>;
 
     fn into_iter(self) -> Self::IntoIter {
-        self.into_buf().iter()
+        IntoIter::new(self)
     }
 }
 
 impl<'a> IntoIterator for &'a Bytes {
-    type Item = u8;
-    type IntoIter = Iter<Cursor<&'a Bytes>>;
+    type Item = &'a u8;
+    type IntoIter = core::slice::Iter<'a, u8>;
 
     fn into_iter(self) -> Self::IntoIter {
-        self.into_buf().iter()
-    }
-}
-
-impl Extend<u8> for Bytes {
-    fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
-        let iter = iter.into_iter();
-
-        let (lower, upper) = iter.size_hint();
-
-        // Avoid possible conversion into mut if there's nothing to add
-        if let Some(0) = upper {
-            return;
-        }
-
-        let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() {
-            Ok(bytes_mut) => bytes_mut,
-            Err(bytes) => {
-                let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower);
-                bytes_mut.put_slice(&bytes);
-                bytes_mut
-            }
-        };
-
-        bytes_mut.extend(iter);
-
-        mem::replace(self, bytes_mut.freeze());
-    }
-}
-
-impl<'a> Extend<&'a u8> for Bytes {
-    fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
-        self.extend(iter.into_iter().map(|b| *b))
+        self.as_slice().into_iter()
     }
 }
 
-/*
- *
- * ===== BytesMut =====
- *
- */
-
-impl BytesMut {
-    /// Creates a new `BytesMut` with the specified capacity.
-    ///
-    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
-    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
-    /// then `BytesMut` will not allocate.
-    ///
-    /// It is important to note that this function does not specify the length
-    /// of the returned `BytesMut`, but only the capacity.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{BytesMut, BufMut};
-    ///
-    /// let mut bytes = BytesMut::with_capacity(64);
-    ///
-    /// // `bytes` contains no data, even though there is capacity
-    /// assert_eq!(bytes.len(), 0);
-    ///
-    /// bytes.put(&b"hello world"[..]);
-    ///
-    /// assert_eq!(&bytes[..], b"hello world");
-    /// ```
-    #[inline]
-    pub fn with_capacity(capacity: usize) -> BytesMut {
-        BytesMut {
-            inner: Inner::with_capacity(capacity),
-        }
-    }
-
-    /// Creates a new `BytesMut` with default capacity.
-    ///
-    /// Resulting object has length 0 and unspecified capacity.
-    /// This function does not allocate.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{BytesMut, BufMut};
-    ///
-    /// let mut bytes = BytesMut::new();
-    ///
-    /// assert_eq!(0, bytes.len());
-    ///
-    /// bytes.reserve(2);
-    /// bytes.put_slice(b"xy");
-    ///
-    /// assert_eq!(&b"xy"[..], &bytes[..]);
-    /// ```
-    #[inline]
-    pub fn new() -> BytesMut {
-        BytesMut::with_capacity(0)
-    }
-
-    /// Returns the number of bytes contained in this `BytesMut`.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let b = BytesMut::from(&b"hello"[..]);
-    /// assert_eq!(b.len(), 5);
-    /// ```
-    #[inline]
-    pub fn len(&self) -> usize {
-        self.inner.len()
-    }
-
-    /// Returns true if the `BytesMut` has a length of 0.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let b = BytesMut::with_capacity(64);
-    /// assert!(b.is_empty());
-    /// ```
-    #[inline]
-    pub fn is_empty(&self) -> bool {
-        self.len() == 0
-    }
-
-    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let b = BytesMut::with_capacity(64);
-    /// assert_eq!(b.capacity(), 64);
-    /// ```
-    #[inline]
-    pub fn capacity(&self) -> usize {
-        self.inner.capacity()
-    }
-
-    /// Converts `self` into an immutable `Bytes`.
-    ///
-    /// The conversion is zero cost and is used to indicate that the slice
-    /// referenced by the handle will no longer be mutated. Once the conversion
-    /// is done, the handle can be cloned and shared across threads.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{BytesMut, BufMut};
-    /// use std::thread;
-    ///
-    /// let mut b = BytesMut::with_capacity(64);
-    /// b.put("hello world");
-    /// let b1 = b.freeze();
-    /// let b2 = b1.clone();
-    ///
-    /// let th = thread::spawn(move || {
-    ///     assert_eq!(&b1[..], b"hello world");
-    /// });
-    ///
-    /// assert_eq!(&b2[..], b"hello world");
-    /// th.join().unwrap();
-    /// ```
-    #[inline]
-    pub fn freeze(self) -> Bytes {
-        Bytes { inner: self.inner }
-    }
-
-    /// Splits the bytes into two at the given index.
-    ///
-    /// Afterwards `self` contains elements `[0, at)`, and the returned
-    /// `BytesMut` contains elements `[at, capacity)`.
-    ///
-    /// This is an `O(1)` operation that just increases the reference count
-    /// and sets a few indices.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut a = BytesMut::from(&b"hello world"[..]);
-    /// let mut b = a.split_off(5);
-    ///
-    /// a[0] = b'j';
-    /// b[0] = b'!';
-    ///
-    /// assert_eq!(&a[..], b"jello");
-    /// assert_eq!(&b[..], b"!world");
-    /// ```
-    ///
-    /// # Panics
-    ///
-    /// Panics if `at > capacity`.
-    pub fn split_off(&mut self, at: usize) -> BytesMut {
-        BytesMut {
-            inner: self.inner.split_off(at),
-        }
-    }
-
-    /// Removes the bytes from the current view, returning them in a new
-    /// `BytesMut` handle.
-    ///
-    /// Afterwards, `self` will be empty, but will retain any additional
-    /// capacity that it had before the operation. This is identical to
-    /// `self.split_to(self.len())`.
-    ///
-    /// This is an `O(1)` operation that just increases the reference count and
-    /// sets a few indices.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::{BytesMut, BufMut};
-    ///
-    /// let mut buf = BytesMut::with_capacity(1024);
-    /// buf.put(&b"hello world"[..]);
-    ///
-    /// let other = buf.take();
-    ///
-    /// assert!(buf.is_empty());
-    /// assert_eq!(1013, buf.capacity());
-    ///
-    /// assert_eq!(other, b"hello world"[..]);
-    /// ```
-    pub fn take(&mut self) -> BytesMut {
-        let len = self.len();
-        self.split_to(len)
-    }
-
-    #[deprecated(since = "0.4.1", note = "use take instead")]
-    #[doc(hidden)]
-    pub fn drain(&mut self) -> BytesMut {
-        self.take()
-    }
-
-    /// Splits the buffer into two at the given index.
-    ///
-    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
-    /// contains elements `[0, at)`.
-    ///
-    /// This is an `O(1)` operation that just increases the reference count and
-    /// sets a few indices.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut a = BytesMut::from(&b"hello world"[..]);
-    /// let mut b = a.split_to(5);
-    ///
-    /// a[0] = b'!';
-    /// b[0] = b'j';
-    ///
-    /// assert_eq!(&a[..], b"!world");
-    /// assert_eq!(&b[..], b"jello");
-    /// ```
-    ///
-    /// # Panics
-    ///
-    /// Panics if `at > len`.
-    pub fn split_to(&mut self, at: usize) -> BytesMut {
-        BytesMut {
-            inner: self.inner.split_to(at),
-        }
-    }
-
-    #[deprecated(since = "0.4.1", note = "use split_to instead")]
-    #[doc(hidden)]
-    pub fn drain_to(&mut self, at: usize) -> BytesMut {
-        self.split_to(at)
-    }
-
-    /// Shortens the buffer, keeping the first `len` bytes and dropping the
-    /// rest.
-    ///
-    /// If `len` is greater than the buffer's current length, this has no
-    /// effect.
-    ///
-    /// The [`split_off`] method can emulate `truncate`, but this causes the
-    /// excess bytes to be returned instead of dropped.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::from(&b"hello world"[..]);
-    /// buf.truncate(5);
-    /// assert_eq!(buf, b"hello"[..]);
-    /// ```
-    ///
-    /// [`split_off`]: #method.split_off
-    pub fn truncate(&mut self, len: usize) {
-        self.inner.truncate(len);
-    }
-
-    /// Shortens the buffer, dropping the first `cnt` bytes and keeping the
-    /// rest.
-    ///
-    /// This is the same function as `Buf::advance`, and in the next breaking
-    /// release of `bytes`, this implementation will be removed in favor of
-    /// having `BytesMut` implement `Buf`.
-    ///
-    /// # Panics
-    ///
-    /// This function panics if `cnt` is greater than `self.len()`
-    #[inline]
-    pub fn advance(&mut self, cnt: usize) {
-        assert!(cnt <= self.len(), "cannot advance past `remaining`");
-        unsafe { self.inner.set_start(cnt); }
-    }
-
-    /// Clears the buffer, removing all data.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::from(&b"hello world"[..]);
-    /// buf.clear();
-    /// assert!(buf.is_empty());
-    /// ```
-    pub fn clear(&mut self) {
-        self.truncate(0);
-    }
-
-    /// Resizes the buffer so that `len` is equal to `new_len`.
-    ///
-    /// If `new_len` is greater than `len`, the buffer is extended by the
-    /// difference with each additional byte set to `value`. If `new_len` is
-    /// less than `len`, the buffer is simply truncated.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::new();
-    ///
-    /// buf.resize(3, 0x1);
-    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
-    ///
-    /// buf.resize(2, 0x2);
-    /// assert_eq!(&buf[..], &[0x1, 0x1]);
-    ///
-    /// buf.resize(4, 0x3);
-    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
-    /// ```
-    pub fn resize(&mut self, new_len: usize, value: u8) {
-        self.inner.resize(new_len, value);
-    }
-
-    /// Sets the length of the buffer.
-    ///
-    /// This will explicitly set the size of the buffer without actually
-    /// modifying the data, so it is up to the caller to ensure that the data
-    /// has been initialized.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut b = BytesMut::from(&b"hello world"[..]);
-    ///
-    /// unsafe {
-    ///     b.set_len(5);
-    /// }
-    ///
-    /// assert_eq!(&b[..], b"hello");
-    ///
-    /// unsafe {
-    ///     b.set_len(11);
-    /// }
-    ///
-    /// assert_eq!(&b[..], b"hello world");
-    /// ```
-    ///
-    /// # Panics
-    ///
-    /// This method will panic if `len` is out of bounds for the underlying
-    /// slice or if it comes after the `end` of the configured window.
-    pub unsafe fn set_len(&mut self, len: usize) {
-        self.inner.set_len(len)
-    }
-
-    /// Reserves capacity for at least `additional` more bytes to be inserted
-    /// into the given `BytesMut`.
-    ///
-    /// More than `additional` bytes may be reserved in order to avoid frequent
-    /// reallocations. A call to `reserve` may result in an allocation.
-    ///
-    /// Before allocating new buffer space, the function will attempt to reclaim
-    /// space in the existing buffer. If the current handle references a small
-    /// view in the original buffer and all other handles have been dropped,
-    /// and the requested capacity is less than or equal to the existing
-    /// buffer's capacity, then the current view will be copied to the front of
-    /// the buffer and the handle will take ownership of the full buffer.
-    ///
-    /// # Examples
-    ///
-    /// In the following example, a new buffer is allocated.
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::from(&b"hello"[..]);
-    /// buf.reserve(64);
-    /// assert!(buf.capacity() >= 69);
-    /// ```
-    ///
-    /// In the following example, the existing buffer is reclaimed.
-    ///
-    /// ```
-    /// use bytes::{BytesMut, BufMut};
-    ///
-    /// let mut buf = BytesMut::with_capacity(128);
-    /// buf.put(&[0; 64][..]);
-    ///
-    /// let ptr = buf.as_ptr();
-    /// let other = buf.take();
-    ///
-    /// assert!(buf.is_empty());
-    /// assert_eq!(buf.capacity(), 64);
-    ///
-    /// drop(other);
-    /// buf.reserve(128);
-    ///
-    /// assert_eq!(buf.capacity(), 128);
-    /// assert_eq!(buf.as_ptr(), ptr);
-    /// ```
-    ///
-    /// # Panics
-    ///
-    /// Panics if the new capacity overflows `usize`.
-    pub fn reserve(&mut self, additional: usize) {
-        self.inner.reserve(additional)
-    }
-
-    /// Appends given bytes to this object.
-    ///
-    /// If this `BytesMut` object has not enough capacity, it is resized first.
-    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::with_capacity(0);
-    /// buf.extend_from_slice(b"aaabbb");
-    /// buf.extend_from_slice(b"cccddd");
-    ///
-    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
-    /// ```
-    pub fn extend_from_slice(&mut self, extend: &[u8]) {
-        self.reserve(extend.len());
-        self.put_slice(extend);
-    }
-
-    /// Combine splitted BytesMut objects back as contiguous.
-    ///
-    /// If `BytesMut` objects were not contiguous originally, they will be extended.
-    ///
-    /// # Examples
-    ///
-    /// ```
-    /// use bytes::BytesMut;
-    ///
-    /// let mut buf = BytesMut::with_capacity(64);
-    /// buf.extend_from_slice(b"aaabbbcccddd");
-    ///
-    /// let splitted = buf.split_off(6);
-    /// assert_eq!(b"aaabbb", &buf[..]);
-    /// assert_eq!(b"cccddd", &splitted[..]);
-    ///
-    /// buf.unsplit(splitted);
-    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
-    /// ```
-    pub fn unsplit(&mut self, other: BytesMut) {
-        let ptr;
-
-        if other.is_empty() {
-            return;
-        }
-
-        if self.is_empty() {
-            *self = other;
-            return;
-        }
-
-        unsafe {
-            ptr = self.inner.ptr.offset(self.inner.len as isize); 
-        }
-        if ptr == other.inner.ptr &&
-           self.inner.kind() == KIND_ARC &&
-           other.inner.kind() == KIND_ARC
-        {
-            debug_assert_eq!(self.inner.arc.load(Acquire),
-                             other.inner.arc.load(Acquire));
-            // Contiguous blocks, just combine directly
-            self.inner.len += other.inner.len;
-            self.inner.cap += other.inner.cap;
-        }
-        else {
-            self.extend_from_slice(&other);
-        }
-    }
-}
-
-impl BufMut for BytesMut {
-    #[inline]
-    fn remaining_mut(&self) -> usize {
-        self.capacity() - self.len()
-    }
-
-    #[inline]
-    unsafe fn advance_mut(&mut self, cnt: usize) {
-        let new_len = self.len() + cnt;
-
-        // This call will panic if `cnt` is too big
-        self.inner.set_len(new_len);
-    }
-
-    #[inline]
-    unsafe fn bytes_mut(&mut self) -> &mut [u8] {
-        let len = self.len();
-
-        // This will never panic as `len` can never become invalid
-        &mut self.inner.as_raw()[len..]
-    }
-
-    #[inline]
-    fn put_slice(&mut self, src: &[u8]) {
-        assert!(self.remaining_mut() >= src.len());
-
-        let len = src.len();
-
-        unsafe {
-            self.bytes_mut()[..len].copy_from_slice(src);
-            self.advance_mut(len);
-        }
-    }
-
-    #[inline]
-    fn put_u8(&mut self, n: u8) {
-        self.inner.put_u8(n);
-    }
-
-    #[inline]
-    fn put_i8(&mut self, n: i8) {
-        self.put_u8(n as u8);
-    }
-}
-
-impl IntoBuf for BytesMut {
-    type Buf = Cursor<Self>;
-
-    fn into_buf(self) -> Self::Buf {
-        Cursor::new(self)
-    }
-}
-
-impl<'a> IntoBuf for &'a BytesMut {
-    type Buf = Cursor<&'a BytesMut>;
-
-    fn into_buf(self) -> Self::Buf {
-        Cursor::new(self)
-    }
-}
-
-impl AsRef<[u8]> for BytesMut {
-    #[inline]
-    fn as_ref(&self) -> &[u8] {
-        self.inner.as_ref()
-    }
-}
-
-impl ops::Deref for BytesMut {
-    type Target = [u8];
-
-    #[inline]
-    fn deref(&self) -> &[u8] {
-        self.as_ref()
-    }
-}
-
-impl AsMut<[u8]> for BytesMut {
-    fn as_mut(&mut self) -> &mut [u8] {
-        self.inner.as_mut()
-    }
-}
-
-impl ops::DerefMut for BytesMut {
-    #[inline]
-    fn deref_mut(&mut self) -> &mut [u8] {
-        self.inner.as_mut()
-    }
-}
-
-impl From<Vec<u8>> for BytesMut {
-    fn from(src: Vec<u8>) -> BytesMut {
-        BytesMut {
-            inner: Inner::from_vec(src),
-        }
-    }
-}
-
-impl From<String> for BytesMut {
-    fn from(src: String) -> BytesMut {
-        BytesMut::from(src.into_bytes())
-    }
-}
-
-impl<'a> From<&'a [u8]> for BytesMut {
-    fn from(src: &'a [u8]) -> BytesMut {
-        let len = src.len();
-
-        if len == 0 {
-            BytesMut::new()
-        } else if len <= INLINE_CAP {
-            unsafe {
-                let mut inner: Inner = mem::uninitialized();
-
-                // Set inline mask
-                inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
-                inner.set_inline_len(len);
-                inner.as_raw()[0..len].copy_from_slice(src);
-
-                BytesMut {
-                    inner: inner,
-                }
-            }
-        } else {
-            BytesMut::from(src.to_vec())
-        }
-    }
-}
-
-impl<'a> From<&'a str> for BytesMut {
-    fn from(src: &'a str) -> BytesMut {
-        BytesMut::from(src.as_bytes())
-    }
-}
-
-impl From<Bytes> for BytesMut {
-    fn from(src: Bytes) -> BytesMut {
-        src.try_mut()
-            .unwrap_or_else(|src| BytesMut::from(&src[..]))
-    }
-}
-
-impl PartialEq for BytesMut {
-    fn eq(&self, other: &BytesMut) -> bool {
-        self.inner.as_ref() == other.inner.as_ref()
-    }
-}
-
-impl PartialOrd for BytesMut {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(other.inner.as_ref())
-    }
-}
-
-impl Ord for BytesMut {
-    fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
-        self.inner.as_ref().cmp(other.inner.as_ref())
-    }
-}
-
-impl Eq for BytesMut {
-}
-
-impl Default for BytesMut {
-    #[inline]
-    fn default() -> BytesMut {
-        BytesMut::new()
-    }
-}
-
-impl fmt::Debug for BytesMut {
-    fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
-    }
-}
-
-impl hash::Hash for BytesMut {
-    fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
-        let s: &[u8] = self.as_ref();
-        s.hash(state);
-    }
-}
-
-impl Borrow<[u8]> for BytesMut {
-    fn borrow(&self) -> &[u8] {
-        self.as_ref()
-    }
-}
-
-impl BorrowMut<[u8]> for BytesMut {
-    fn borrow_mut(&mut self) -> &mut [u8] {
-        self.as_mut()
-    }
-}
-
-impl fmt::Write for BytesMut {
-    #[inline]
-    fn write_str(&mut self, s: &str) -> fmt::Result {
-        if self.remaining_mut() >= s.len() {
-            self.put_slice(s.as_bytes());
-            Ok(())
-        } else {
-            Err(fmt::Error)
-        }
-    }
-
-    #[inline]
-    fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
-        fmt::write(self, args)
-    }
-}
-
-impl Clone for BytesMut {
-    fn clone(&self) -> BytesMut {
-        BytesMut::from(&self[..])
-    }
-}
-
-impl IntoIterator for BytesMut {
-    type Item = u8;
-    type IntoIter = Iter<Cursor<BytesMut>>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.into_buf().iter()
-    }
-}
-
-impl<'a> IntoIterator for &'a BytesMut {
-    type Item = u8;
-    type IntoIter = Iter<Cursor<&'a BytesMut>>;
-
-    fn into_iter(self) -> Self::IntoIter {
-        self.into_buf().iter()
-    }
-}
-
-impl Extend<u8> for BytesMut {
-    fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
-        let iter = iter.into_iter();
-
-        let (lower, _) = iter.size_hint();
-        self.reserve(lower);
-
-        for b in iter {
-            unsafe {
-                self.bytes_mut()[0] = b;
-                self.advance_mut(1);
-            }
-        }
-    }
-}
-
-impl<'a> Extend<&'a u8> for BytesMut {
-    fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
-        self.extend(iter.into_iter().map(|b| *b))
+impl FromIterator<u8> for Bytes {
+    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+        Vec::from_iter(into_iter).into()
     }
 }
 
-/*
- *
- * ===== Inner =====
- *
- */
-
-impl Inner {
-    #[inline]
-    fn from_static(bytes: &'static [u8]) -> Inner {
-        let ptr = bytes.as_ptr() as *mut u8;
-
-        Inner {
-            // `arc` won't ever store a pointer. Instead, use it to
-            // track the fact that the `Bytes` handle is backed by a
-            // static buffer.
-            arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
-            ptr: ptr,
-            len: bytes.len(),
-            cap: bytes.len(),
-        }
-    }
-
-    #[inline]
-    fn from_vec(mut src: Vec<u8>) -> Inner {
-        let len = src.len();
-        let cap = src.capacity();
-        let ptr = src.as_mut_ptr();
-
-        mem::forget(src);
-
-        let original_capacity_repr = original_capacity_to_repr(cap);
-        let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
-
-        Inner {
-            arc: AtomicPtr::new(arc as *mut Shared),
-            ptr: ptr,
-            len: len,
-            cap: cap,
-        }
-    }
-
-    #[inline]
-    fn with_capacity(capacity: usize) -> Inner {
-        if capacity <= INLINE_CAP {
-            unsafe {
-                // Using uninitialized memory is ~30% faster
-                let mut inner: Inner = mem::uninitialized();
-                inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
-                inner
-            }
-        } else {
-            Inner::from_vec(Vec::with_capacity(capacity))
-        }
-    }
-
-    /// Return a slice for the handle's view into the shared buffer
-    #[inline]
-    fn as_ref(&self) -> &[u8] {
-        unsafe {
-            if self.is_inline() {
-                slice::from_raw_parts(self.inline_ptr(), self.inline_len())
-            } else {
-                slice::from_raw_parts(self.ptr, self.len)
-            }
-        }
-    }
-
-    /// Return a mutable slice for the handle's view into the shared buffer
-    #[inline]
-    fn as_mut(&mut self) -> &mut [u8] {
-        debug_assert!(!self.is_static());
-
-        unsafe {
-            if self.is_inline() {
-                slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
-            } else {
-                slice::from_raw_parts_mut(self.ptr, self.len)
-            }
-        }
-    }
-
-    /// Return a mutable slice for the handle's view into the shared buffer
-    /// including potentially uninitialized bytes.
-    #[inline]
-    unsafe fn as_raw(&mut self) -> &mut [u8] {
-        debug_assert!(!self.is_static());
-
-        if self.is_inline() {
-            slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
-        } else {
-            slice::from_raw_parts_mut(self.ptr, self.cap)
-        }
-    }
-
-    /// Insert a byte into the next slot and advance the len by 1.
-    #[inline]
-    fn put_u8(&mut self, n: u8) {
-        if self.is_inline() {
-            let len = self.inline_len();
-            assert!(len < INLINE_CAP);
-            unsafe {
-                *self.inline_ptr().offset(len as isize) = n;
-            }
-            self.set_inline_len(len + 1);
-        } else {
-            assert!(self.len < self.cap);
-            unsafe {
-                *self.ptr.offset(self.len as isize) = n;
-            }
-            self.len += 1;
-        }
-    }
-
-    #[inline]
-    fn len(&self) -> usize {
-        if self.is_inline() {
-            self.inline_len()
-        } else {
-            self.len
-        }
-    }
-
-    /// Pointer to the start of the inline buffer
-    #[inline]
-    unsafe fn inline_ptr(&self) -> *mut u8 {
-        (self as *const Inner as *mut Inner as *mut u8)
-            .offset(INLINE_DATA_OFFSET)
-    }
-
-    #[inline]
-    fn inline_len(&self) -> usize {
-        let p: &usize = unsafe { mem::transmute(&self.arc) };
-        (p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
-    }
-
-    /// Set the length of the inline buffer. This is done by writing to the
-    /// least significant byte of the `arc` field.
-    #[inline]
-    fn set_inline_len(&mut self, len: usize) {
-        debug_assert!(len <= INLINE_CAP);
-        let p = self.arc.get_mut();
-        *p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _;
-    }
-
-    /// slice.
-    #[inline]
-    unsafe fn set_len(&mut self, len: usize) {
-        if self.is_inline() {
-            assert!(len <= INLINE_CAP);
-            self.set_inline_len(len);
-        } else {
-            assert!(len <= self.cap);
-            self.len = len;
-        }
-    }
-
-    #[inline]
-    fn is_empty(&self) -> bool {
-        self.len() == 0
-    }
-
-    #[inline]
-    fn capacity(&self) -> usize {
-        if self.is_inline() {
-            INLINE_CAP
-        } else {
-            self.cap
-        }
-    }
-
-    fn split_off(&mut self, at: usize) -> Inner {
-        let mut other = unsafe { self.shallow_clone(true) };
-
-        unsafe {
-            other.set_start(at);
-            self.set_end(at);
-        }
-
-        return other
-    }
-
-    fn split_to(&mut self, at: usize) -> Inner {
-        let mut other = unsafe { self.shallow_clone(true) };
-
-        unsafe {
-            other.set_end(at);
-            self.set_start(at);
-        }
-
-        return other
-    }
-
-    fn truncate(&mut self, len: usize) {
-        if len <= self.len() {
-            unsafe { self.set_len(len); }
-        }
-    }
-
-    fn resize(&mut self, new_len: usize, value: u8) {
-        let len = self.len();
-        if new_len > len {
-            let additional = new_len - len;
-            self.reserve(additional);
-            unsafe {
-                let dst = self.as_raw()[len..].as_mut_ptr();
-                ptr::write_bytes(dst, value, additional);
-                self.set_len(new_len);
-            }
-        } else {
-            self.truncate(new_len);
-        }
-    }
-
-    unsafe fn set_start(&mut self, start: usize) {
-        // Setting the start to 0 is a no-op, so return early if this is the
-        // case.
-        if start == 0 {
-            return;
-        }
-
-        let kind = self.kind();
-
-        // Always check `inline` first, because if the handle is using inline
-        // data storage, all of the `Inner` struct fields will be gibberish.
-        if kind == KIND_INLINE {
-            assert!(start <= INLINE_CAP);
-
-            let len = self.inline_len();
-
-            if len <= start {
-                self.set_inline_len(0);
-            } else {
-                // `set_start` is essentially shifting data off the front of the
-                // view. Inlined buffers only track the length of the slice.
-                // So, to update the start, the data at the new starting point
-                // is copied to the beginning of the buffer.
-                let new_len = len - start;
-
-                let dst = self.inline_ptr();
-                let src = (dst as *const u8).offset(start as isize);
-
-                ptr::copy(src, dst, new_len);
-
-                self.set_inline_len(new_len);
-            }
-        } else {
-            assert!(start <= self.cap);
-
-            if kind == KIND_VEC {
-                // Setting the start when in vec representation is a little more
-                // complicated. First, we have to track how far ahead the
-                // "start" of the byte buffer from the beginning of the vec. We
-                // also have to ensure that we don't exceed the maximum shift.
-                let (mut pos, prev) = self.uncoordinated_get_vec_pos();
-                pos += start;
-
-                if pos <= MAX_VEC_POS {
-                    self.uncoordinated_set_vec_pos(pos, prev);
-                } else {
-                    // The repr must be upgraded to ARC. This will never happen
-                    // on 64 bit systems and will only happen on 32 bit systems
-                    // when shifting past 134,217,727 bytes. As such, we don't
-                    // worry too much about performance here.
-                    let _ = self.shallow_clone(true);
-                }
-            }
-
-            // Updating the start of the view is setting `ptr` to point to the
-            // new start and updating the `len` field to reflect the new length
-            // of the view.
-            self.ptr = self.ptr.offset(start as isize);
-
-            if self.len >= start {
-                self.len -= start;
-            } else {
-                self.len = 0;
-            }
-
-            self.cap -= start;
-        }
-    }
-
-    unsafe fn set_end(&mut self, end: usize) {
-        debug_assert!(self.is_shared());
-
-        // Always check `inline` first, because if the handle is using inline
-        // data storage, all of the `Inner` struct fields will be gibberish.
-        if self.is_inline() {
-            assert!(end <= INLINE_CAP);
-            let new_len = cmp::min(self.inline_len(), end);
-            self.set_inline_len(new_len);
-        } else {
-            assert!(end <= self.cap);
-
-            self.cap = end;
-            self.len = cmp::min(self.len, end);
-        }
-    }
-
-    /// Checks if it is safe to mutate the memory
-    fn is_mut_safe(&mut self) -> bool {
-        let kind = self.kind();
-
-        // Always check `inline` first, because if the handle is using inline
-        // data storage, all of the `Inner` struct fields will be gibberish.
-        if kind == KIND_INLINE {
-            // Inlined buffers can always be mutated as the data is never shared
-            // across handles.
-            true
-        } else if kind == KIND_VEC {
-            true
-        } else if kind == KIND_STATIC {
-            false
-        } else {
-            // Otherwise, the underlying buffer is potentially shared with other
-            // handles, so the ref_count needs to be checked.
-            unsafe { (**self.arc.get_mut()).is_unique() }
-        }
-    }
-
-    /// Increments the ref count. This should only be done if it is known that
-    /// it can be done safely. As such, this fn is not public, instead other
-    /// fns will use this one while maintaining the guarantees.
-    /// Parameter `mut_self` should only be set to `true` if caller holds
-    /// `&mut self` reference.
-    ///
-    /// "Safely" is defined as not exposing two `BytesMut` values that point to
-    /// the same byte window.
-    ///
-    /// This function is thread safe.
-    unsafe fn shallow_clone(&self, mut_self: bool) -> Inner {
-        // Always check `inline` first, because if the handle is using inline
-        // data storage, all of the `Inner` struct fields will be gibberish.
-        //
-        // Additionally, if kind is STATIC, then Arc is *never* changed, making
-        // it safe and faster to check for it now before an atomic acquire.
-
-        if self.is_inline_or_static() {
-            // In this case, a shallow_clone still involves copying the data.
-            let mut inner: Inner = mem::uninitialized();
-            ptr::copy_nonoverlapping(
-                self,
-                &mut inner,
-                1,
-            );
-            inner
-        } else {
-            self.shallow_clone_sync(mut_self)
-        }
-    }
-
+// impl Eq
 
-    #[cold]
-    unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner {
-        // The function requires `&self`, this means that `shallow_clone`
-        // could be called concurrently.
-        //
-        // The first step is to load the value of `arc`. This will determine
-        // how to proceed. The `Acquire` ordering synchronizes with the
-        // `compare_and_swap` that comes later in this function. The goal is
-        // to ensure that if `arc` is currently set to point to a `Shared`,
-        // that the current thread acquires the associated memory.
-        let arc = self.arc.load(Acquire);
-        let kind = arc as usize & KIND_MASK;
-
-        if kind == KIND_ARC {
-            self.shallow_clone_arc(arc)
-        } else {
-            assert!(kind == KIND_VEC);
-            self.shallow_clone_vec(arc as usize, mut_self)
-        }
-    }
-
-    unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner {
-        debug_assert!(arc as usize & KIND_MASK == KIND_ARC);
-
-        let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
-
-        if old_size == usize::MAX {
-            abort();
-        }
-
-        Inner {
-            arc: AtomicPtr::new(arc),
-            .. *self
-        }
-    }
-
-    #[cold]
-    unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner {
-        // If  the buffer is still tracked in a `Vec<u8>`. It is time to
-        // promote the vec to an `Arc`. This could potentially be called
-        // concurrently, so some care must be taken.
-
-        debug_assert!(arc & KIND_MASK == KIND_VEC);
-
-        let original_capacity_repr =
-            (arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
-
-        // The vec offset cannot be concurrently mutated, so there
-        // should be no danger reading it.
-        let off = (arc as usize) >> VEC_POS_OFFSET;
-
-        // First, allocate a new `Shared` instance containing the
-        // `Vec` fields. It's important to note that `ptr`, `len`,
-        // and `cap` cannot be mutated without having `&mut self`.
-        // This means that these fields will not be concurrently
-        // updated and since the buffer hasn't been promoted to an
-        // `Arc`, those three fields still are the components of the
-        // vector.
-        let shared = Box::new(Shared {
-            vec: rebuild_vec(self.ptr, self.len, self.cap, off),
-            original_capacity_repr: original_capacity_repr,
-            // Initialize refcount to 2. One for this reference, and one
-            // for the new clone that will be returned from
-            // `shallow_clone`.
-            ref_count: AtomicUsize::new(2),
-        });
-
-        let shared = Box::into_raw(shared);
-
-        // The pointer should be aligned, so this assert should
-        // always succeed.
-        debug_assert!(0 == (shared as usize & 0b11));
-
-        // If there are no references to self in other threads,
-        // expensive atomic operations can be avoided.
-        if mut_self {
-            self.arc.store(shared, Relaxed);
-            return Inner {
-                arc: AtomicPtr::new(shared),
-                .. *self
-            };
-        }
-
-        // Try compare & swapping the pointer into the `arc` field.
-        // `Release` is used synchronize with other threads that
-        // will load the `arc` field.
-        //
-        // If the `compare_and_swap` fails, then the thread lost the
-        // race to promote the buffer to shared. The `Acquire`
-        // ordering will synchronize with the `compare_and_swap`
-        // that happened in the other thread and the `Shared`
-        // pointed to by `actual` will be visible.
-        let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel);
-
-        if actual as usize == arc {
-            // The upgrade was successful, the new handle can be
-            // returned.
-            return Inner {
-                arc: AtomicPtr::new(shared),
-                .. *self
-            };
-        }
-
-        // The upgrade failed, a concurrent clone happened. Release
-        // the allocation that was made in this thread, it will not
-        // be needed.
-        let shared = Box::from_raw(shared);
-        mem::forget(*shared);
-
-        // Buffer already promoted to shared storage, so increment ref
-        // count.
-        self.shallow_clone_arc(actual)
-    }
-
-    #[inline]
-    fn reserve(&mut self, additional: usize) {
-        let len = self.len();
-        let rem = self.capacity() - len;
-
-        if additional <= rem {
-            // The handle can already store at least `additional` more bytes, so
-            // there is no further work needed to be done.
-            return;
-        }
-
-        let kind = self.kind();
-
-        // Always check `inline` first, because if the handle is using inline
-        // data storage, all of the `Inner` struct fields will be gibberish.
-        if kind == KIND_INLINE {
-            let new_cap = len + additional;
-
-            // Promote to a vector
-            let mut v = Vec::with_capacity(new_cap);
-            v.extend_from_slice(self.as_ref());
-
-            self.ptr = v.as_mut_ptr();
-            self.len = v.len();
-            self.cap = v.capacity();
-
-            // Since the minimum capacity is `INLINE_CAP`, don't bother encoding
-            // the original capacity as INLINE_CAP
-            self.arc = AtomicPtr::new(KIND_VEC as *mut Shared);
-
-            mem::forget(v);
-            return;
-        }
-
-        if kind == KIND_VEC {
-            // If there's enough free space before the start of the buffer, then
-            // just copy the data backwards and reuse the already-allocated
-            // space.
-            //
-            // Otherwise, since backed by a vector, use `Vec::reserve`
-            unsafe {
-                let (off, prev) = self.uncoordinated_get_vec_pos();
-
-                // Only reuse space if we stand to gain at least capacity/2
-                // bytes of space back
-                if off >= additional && off >= (self.cap / 2) {
-                    // There's space - reuse it
-                    //
-                    // Just move the pointer back to the start after copying
-                    // data back.
-                    let base_ptr = self.ptr.offset(-(off as isize));
-                    ptr::copy(self.ptr, base_ptr, self.len);
-                    self.ptr = base_ptr;
-                    self.uncoordinated_set_vec_pos(0, prev);
-
-                    // Length stays constant, but since we moved backwards we
-                    // can gain capacity back.
-                    self.cap += off;
-                } else {
-                    // No space - allocate more
-                    let mut v = rebuild_vec(self.ptr, self.len, self.cap, off);
-                    v.reserve(additional);
-
-                    // Update the info
-                    self.ptr = v.as_mut_ptr().offset(off as isize);
-                    self.len = v.len() - off;
-                    self.cap = v.capacity() - off;
-
-                    // Drop the vec reference
-                    mem::forget(v);
-                }
-                return;
-            }
-        }
-
-        let arc = *self.arc.get_mut();
-
-        debug_assert!(kind == KIND_ARC);
-
-        // Reserving involves abandoning the currently shared buffer and
-        // allocating a new vector with the requested capacity.
-        //
-        // Compute the new capacity
-        let mut new_cap = len + additional;
-        let original_capacity;
-        let original_capacity_repr;
-
-        unsafe {
-            original_capacity_repr = (*arc).original_capacity_repr;
-            original_capacity = original_capacity_from_repr(original_capacity_repr);
-
-            // First, try to reclaim the buffer. This is possible if the current
-            // handle is the only outstanding handle pointing to the buffer.
-            if (*arc).is_unique() {
-                // This is the only handle to the buffer. It can be reclaimed.
-                // However, before doing the work of copying data, check to make
-                // sure that the vector has enough capacity.
-                let v = &mut (*arc).vec;
-
-                if v.capacity() >= new_cap {
-                    // The capacity is sufficient, reclaim the buffer
-                    let ptr = v.as_mut_ptr();
-
-                    ptr::copy(self.ptr, ptr, len);
-
-                    self.ptr = ptr;
-                    self.cap = v.capacity();
-
-                    return;
-                }
-
-                // The vector capacity is not sufficient. The reserve request is
-                // asking for more than the initial buffer capacity. Allocate more
-                // than requested if `new_cap` is not much bigger than the current
-                // capacity.
-                //
-                // There are some situations, using `reserve_exact` that the
-                // buffer capacity could be below `original_capacity`, so do a
-                // check.
-                new_cap = cmp::max(
-                    cmp::max(v.capacity() << 1, new_cap),
-                    original_capacity);
-            } else {
-                new_cap = cmp::max(new_cap, original_capacity);
-            }
-        }
-
-        // Create a new vector to store the data
-        let mut v = Vec::with_capacity(new_cap);
-
-        // Copy the bytes
-        v.extend_from_slice(self.as_ref());
-
-        // Release the shared handle. This must be done *after* the bytes are
-        // copied.
-        release_shared(arc);
-
-        // Update self
-        self.ptr = v.as_mut_ptr();
-        self.len = v.len();
-        self.cap = v.capacity();
-
-        let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
-
-        self.arc = AtomicPtr::new(arc as *mut Shared);
-
-        // Forget the vector handle
-        mem::forget(v);
-    }
-
-    /// Returns true if the buffer is stored inline
-    #[inline]
-    fn is_inline(&self) -> bool {
-        self.kind() == KIND_INLINE
-    }
-
-    #[inline]
-    fn is_inline_or_static(&self) -> bool {
-        // The value returned by `kind` isn't itself safe, but the value could
-        // inform what operations to take, and unsafely do something without
-        // synchronization.
-        //
-        // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
-        // information is safe.
-        let kind = self.kind();
-        kind == KIND_INLINE || kind == KIND_STATIC
-    }
-
-    /// Used for `debug_assert` statements. &mut is used to guarantee that it is
-    /// safe to check VEC_KIND
-    #[inline]
-    fn is_shared(&mut self) -> bool {
-        match self.kind() {
-            KIND_VEC => false,
-            _ => true,
-        }
-    }
-
-    /// Used for `debug_assert` statements
-    #[inline]
-    fn is_static(&mut self) -> bool {
-        match self.kind() {
-            KIND_STATIC => true,
-            _ => false,
-        }
-    }
-
-    #[inline]
-    fn kind(&self) -> usize {
-        // This function is going to probably raise some eyebrows. The function
-        // returns true if the buffer is stored inline. This is done by checking
-        // the least significant bit in the `arc` field.
-        //
-        // Now, you may notice that `arc` is an `AtomicPtr` and this is
-        // accessing it as a normal field without performing an atomic load...
-        //
-        // Again, the function only cares about the least significant bit, and
-        // this bit is set when `Inner` is created and never changed after that.
-        // All platforms have atomic "word" operations and won't randomly flip
-        // bits, so even without any explicit atomic operations, reading the
-        // flag will be correct.
-        //
-        // This function is very critical performance wise as it is called for
-        // every operation. Performing an atomic load would mess with the
-        // compiler's ability to optimize. Simple benchmarks show up to a 10%
-        // slowdown using a `Relaxed` atomic load on x86.
-
-        #[cfg(target_endian = "little")]
-        #[inline]
-        fn imp(arc: &AtomicPtr<Shared>) -> usize {
-            unsafe {
-                let p: &u8 = mem::transmute(arc);
-                (*p as usize) & KIND_MASK
-            }
-        }
-
-        #[cfg(target_endian = "big")]
-        #[inline]
-        fn imp(arc: &AtomicPtr<Shared>) -> usize {
-            unsafe {
-                let p: &usize = mem::transmute(arc);
-                *p & KIND_MASK
-            }
-        }
-
-        imp(&self.arc)
-    }
-
-    #[inline]
-    fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) {
-        // Similar to above, this is a pretty crazed function. This should only
-        // be called when in the KIND_VEC mode. This + the &mut self argument
-        // guarantees that there is no possibility of concurrent calls to this
-        // function.
-        let prev = unsafe {
-            let p: &AtomicPtr<Shared> = &self.arc;
-            let p: &usize = mem::transmute(p);
-            *p
-        };
-
-        (prev >> VEC_POS_OFFSET, prev)
-    }
-
-    #[inline]
-    fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) {
-        // Once more... crazy
-        debug_assert!(pos <= MAX_VEC_POS);
-
-        unsafe {
-            let p: &mut AtomicPtr<Shared> = &mut self.arc;
-            let p: &mut usize = mem::transmute(p);
-            *p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK);
-        }
+impl PartialEq for Bytes {
+    fn eq(&self, other: &Bytes) -> bool {
+        self.as_slice() == other.as_slice()
     }
 }
 
-fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
-    unsafe {
-        let ptr = ptr.offset(-(off as isize));
-        len += off;
-        cap += off;
-
-        Vec::from_raw_parts(ptr, len, cap)
-    }
-}
-
-impl Drop for Inner {
-    fn drop(&mut self) {
-        let kind = self.kind();
-
-        if kind == KIND_VEC {
-            let (off, _) = self.uncoordinated_get_vec_pos();
-
-            // Vector storage, free the vector
-            let _ = rebuild_vec(self.ptr, self.len, self.cap, off);
-        } else if kind == KIND_ARC {
-            release_shared(*self.arc.get_mut());
-        }
-    }
-}
-
-fn release_shared(ptr: *mut Shared) {
-    // `Shared` storage... follow the drop steps from Arc.
-    unsafe {
-        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
-            return;
-        }
-
-        // This fence is needed to prevent reordering of use of the data and
-        // deletion of the data.  Because it is marked `Release`, the decreasing
-        // of the reference count synchronizes with this `Acquire` fence. This
-        // means that use of the data happens before decreasing the reference
-        // count, which happens before this fence, which happens before the
-        // deletion of the data.
-        //
-        // As explained in the [Boost documentation][1],
-        //
-        // > It is important to enforce any possible access to the object in one
-        // > thread (through an existing reference) to *happen before* deleting
-        // > the object in a different thread. This is achieved by a "release"
-        // > operation after dropping a reference (any access to the object
-        // > through this reference must obviously happened before), and an
-        // > "acquire" operation before deleting the object.
-        //
-        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
-        atomic::fence(Acquire);
-
-        // Drop the data
-        Box::from_raw(ptr);
-    }
-}
-
-impl Shared {
-    fn is_unique(&self) -> bool {
-        // The goal is to check if the current handle is the only handle
-        // that currently has access to the buffer. This is done by
-        // checking if the `ref_count` is currently 1.
-        //
-        // The `Acquire` ordering synchronizes with the `Release` as
-        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
-        // operation guarantees that any mutations done in other threads
-        // are ordered before the `ref_count` is decremented. As such,
-        // this `Acquire` will guarantee that those mutations are
-        // visible to the current thread.
-        self.ref_count.load(Acquire) == 1
-    }
-}
-
-fn original_capacity_to_repr(cap: usize) -> usize {
-    let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
-    cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
-}
-
-fn original_capacity_from_repr(repr: usize) -> usize {
-    if repr == 0 {
-        return 0;
-    }
-
-    1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
-}
-
-#[test]
-fn test_original_capacity_to_repr() {
-    for &cap in &[0, 1, 16, 1000] {
-        assert_eq!(0, original_capacity_to_repr(cap));
-    }
-
-    for &cap in &[1024, 1025, 1100, 2000, 2047] {
-        assert_eq!(1, original_capacity_to_repr(cap));
-    }
-
-    for &cap in &[2048, 2049] {
-        assert_eq!(2, original_capacity_to_repr(cap));
-    }
-
-    // TODO: more
-
-    for &cap in &[65536, 65537, 68000, 1 << 17, 1 << 18, 1 << 20, 1 << 30] {
-        assert_eq!(7, original_capacity_to_repr(cap), "cap={}", cap);
-    }
-}
-
-#[test]
-fn test_original_capacity_from_repr() {
-    assert_eq!(0, original_capacity_from_repr(0));
-    assert_eq!(1024, original_capacity_from_repr(1));
-    assert_eq!(1024 * 2, original_capacity_from_repr(2));
-    assert_eq!(1024 * 4, original_capacity_from_repr(3));
-    assert_eq!(1024 * 8, original_capacity_from_repr(4));
-    assert_eq!(1024 * 16, original_capacity_from_repr(5));
-    assert_eq!(1024 * 32, original_capacity_from_repr(6));
-    assert_eq!(1024 * 64, original_capacity_from_repr(7));
-}
-
-unsafe impl Send for Inner {}
-unsafe impl Sync for Inner {}
-
-/*
- *
- * ===== PartialEq / PartialOrd =====
- *
- */
-
-impl PartialEq<[u8]> for BytesMut {
-    fn eq(&self, other: &[u8]) -> bool {
-        &**self == other
+impl PartialOrd for Bytes {
+    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_slice())
     }
 }
 
-impl PartialOrd<[u8]> for BytesMut {
-    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
-        (**self).partial_cmp(other)
-    }
-}
-
-impl PartialEq<BytesMut> for [u8] {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl PartialOrd<BytesMut> for [u8] {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
-    }
-}
-
-impl PartialEq<str> for BytesMut {
-    fn eq(&self, other: &str) -> bool {
-        &**self == other.as_bytes()
-    }
-}
-
-impl PartialOrd<str> for BytesMut {
-    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
-        (**self).partial_cmp(other.as_bytes())
-    }
-}
-
-impl PartialEq<BytesMut> for str {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl PartialOrd<BytesMut> for str {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
-    }
-}
-
-impl PartialEq<Vec<u8>> for BytesMut {
-    fn eq(&self, other: &Vec<u8>) -> bool {
-        *self == &other[..]
-    }
-}
-
-impl PartialOrd<Vec<u8>> for BytesMut {
-    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
-        (**self).partial_cmp(&other[..])
-    }
-}
-
-impl PartialEq<BytesMut> for Vec<u8> {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl PartialOrd<BytesMut> for Vec<u8> {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
+impl Ord for Bytes {
+    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
+        self.as_slice().cmp(other.as_slice())
     }
 }
 
-impl PartialEq<String> for BytesMut {
-    fn eq(&self, other: &String) -> bool {
-        *self == &other[..]
-    }
-}
-
-impl PartialOrd<String> for BytesMut {
-    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
-        (**self).partial_cmp(other.as_bytes())
-    }
-}
-
-impl PartialEq<BytesMut> for String {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl PartialOrd<BytesMut> for String {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
-    }
-}
-
-impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
-    where BytesMut: PartialEq<T>
-{
-    fn eq(&self, other: &&'a T) -> bool {
-        *self == **other
-    }
-}
-
-impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
-    where BytesMut: PartialOrd<T>
-{
-    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
-        self.partial_cmp(*other)
-    }
-}
-
-impl<'a> PartialEq<BytesMut> for &'a [u8] {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl<'a> PartialOrd<BytesMut> for &'a [u8] {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
-    }
-}
-
-impl<'a> PartialEq<BytesMut> for &'a str {
-    fn eq(&self, other: &BytesMut) -> bool {
-        *other == *self
-    }
-}
-
-impl<'a> PartialOrd<BytesMut> for &'a str {
-    fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
-        other.partial_cmp(self)
-    }
-}
+impl Eq for Bytes {}
 
 impl PartialEq<[u8]> for Bytes {
     fn eq(&self, other: &[u8]) -> bool {
-        self.inner.as_ref() == other
+        self.as_slice() == other
     }
 }
 
 impl PartialOrd<[u8]> for Bytes {
     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(other)
+        self.as_slice().partial_cmp(other)
     }
 }
 
 impl PartialEq<Bytes> for [u8] {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
@@ -2727,23 +583,23 @@ impl PartialEq<Bytes> for [u8] {
 impl PartialOrd<Bytes> for [u8] {
     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
         other.partial_cmp(self)
     }
 }
 
 impl PartialEq<str> for Bytes {
     fn eq(&self, other: &str) -> bool {
-        self.inner.as_ref() == other.as_bytes()
+        self.as_slice() == other.as_bytes()
     }
 }
 
 impl PartialOrd<str> for Bytes {
     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(other.as_bytes())
+        self.as_slice().partial_cmp(other.as_bytes())
     }
 }
 
 impl PartialEq<Bytes> for str {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
@@ -2757,17 +613,17 @@ impl PartialOrd<Bytes> for str {
 impl PartialEq<Vec<u8>> for Bytes {
     fn eq(&self, other: &Vec<u8>) -> bool {
         *self == &other[..]
     }
 }
 
 impl PartialOrd<Vec<u8>> for Bytes {
     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(&other[..])
+        self.as_slice().partial_cmp(&other[..])
     }
 }
 
 impl PartialEq<Bytes> for Vec<u8> {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
@@ -2781,51 +637,51 @@ impl PartialOrd<Bytes> for Vec<u8> {
 impl PartialEq<String> for Bytes {
     fn eq(&self, other: &String) -> bool {
         *self == &other[..]
     }
 }
 
 impl PartialOrd<String> for Bytes {
     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
-        self.inner.as_ref().partial_cmp(other.as_bytes())
+        self.as_slice().partial_cmp(other.as_bytes())
     }
 }
 
 impl PartialEq<Bytes> for String {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
 
 impl PartialOrd<Bytes> for String {
     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
         other.partial_cmp(self)
     }
 }
 
-impl<'a> PartialEq<Bytes> for &'a [u8] {
+impl PartialEq<Bytes> for &[u8] {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
 
-impl<'a> PartialOrd<Bytes> for &'a [u8] {
+impl PartialOrd<Bytes> for &[u8] {
     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
         other.partial_cmp(self)
     }
 }
 
-impl<'a> PartialEq<Bytes> for &'a str {
+impl PartialEq<Bytes> for &str {
     fn eq(&self, other: &Bytes) -> bool {
         *other == *self
     }
 }
 
-impl<'a> PartialOrd<Bytes> for &'a str {
+impl PartialOrd<Bytes> for &str {
     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
         other.partial_cmp(self)
     }
 }
 
 impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
     where Bytes: PartialEq<T>
 {
@@ -2837,39 +693,312 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for
 impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
     where Bytes: PartialOrd<T>
 {
     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
         self.partial_cmp(&**other)
     }
 }
 
-impl PartialEq<BytesMut> for Bytes
-{
-    fn eq(&self, other: &BytesMut) -> bool {
-        &other[..] == &self[..]
+// impl From
+
+impl Default for Bytes {
+    #[inline]
+    fn default() -> Bytes {
+        Bytes::new()
+    }
+}
+
+impl From<&'static [u8]> for Bytes {
+    fn from(slice: &'static [u8]) -> Bytes {
+        Bytes::from_static(slice)
+    }
+}
+
+impl From<&'static str> for Bytes {
+    fn from(slice: &'static str) -> Bytes {
+        Bytes::from_static(slice.as_bytes())
+    }
+}
+
+impl From<Vec<u8>> for Bytes {
+    fn from(vec: Vec<u8>) -> Bytes {
+        // into_boxed_slice doesn't return a heap allocation for empty vectors,
+        // so the pointer isn't aligned enough for the KIND_VEC stashing to
+        // work.
+        if vec.is_empty() {
+            return Bytes::new();
+        }
+
+        let slice = vec.into_boxed_slice();
+        let len = slice.len();
+        let ptr = slice.as_ptr();
+
+        assert!(
+            ptr as usize & KIND_VEC == 0,
+            "Vec pointer should not have LSB set: {:p}",
+            ptr,
+        );
+        drop(Box::into_raw(slice));
+
+        let data = ptr as usize | KIND_VEC;
+        Bytes {
+            ptr,
+            len,
+            data: AtomicPtr::new(data as *mut _),
+            vtable: &SHARED_VTABLE,
+        }
+    }
+}
+
+impl From<String> for Bytes {
+    fn from(s: String) -> Bytes {
+        Bytes::from(s.into_bytes())
     }
 }
 
-impl PartialEq<Bytes> for BytesMut
-{
-    fn eq(&self, other: &Bytes) -> bool {
-        &other[..] == &self[..]
+// ===== impl Vtable =====
+
+impl fmt::Debug for Vtable {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Vtable")
+            .field("clone", &(self.clone as *const ()))
+            .field("drop", &(self.drop as *const ()))
+            .finish()
+    }
+}
+
+// ===== impl StaticVtable =====
+
+const STATIC_VTABLE: Vtable = Vtable {
+    clone: static_clone,
+    drop: static_drop,
+};
+
+unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let slice = slice::from_raw_parts(ptr, len);
+    Bytes::from_static(slice)
+}
+
+unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
+    // nothing to drop for &'static [u8]
+}
+
+// ===== impl SharedVtable =====
+
+struct Shared {
+    // holds vec for drop, but otherwise doesnt access it
+    _vec: Vec<u8>,
+    ref_cnt: AtomicUsize,
+}
+
+static SHARED_VTABLE: Vtable = Vtable {
+    clone: shared_clone,
+    drop: shared_drop,
+};
+
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shallow_clone_arc(shared as _, ptr, len)
+    } else {
+        debug_assert_eq!(kind, KIND_VEC);
+        shallow_clone_vec(data, shared, ptr, len)
+    }
+}
+
+unsafe fn shared_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
+    let shared = *data.get_mut();
+    let kind = shared as usize & KIND_MASK;
+
+
+    if kind == KIND_ARC {
+        release_shared(shared as *mut Shared);
+    } else {
+        debug_assert_eq!(kind, KIND_VEC);
+
+        drop(rebuild_vec(shared, ptr, len));
     }
 }
 
-// While there is `std::process:abort`, it's only available in Rust 1.17, and
-// our minimum supported version is currently 1.15. So, this acts as an abort
-// by triggering a double panic, which always aborts in Rust.
-struct Abort;
+unsafe fn rebuild_vec(shared: *const (), offset: *const u8, len: usize) -> Vec<u8> {
+    debug_assert!(
+        shared as usize & KIND_MASK == KIND_VEC,
+        "rebuild_vec should have beeen called with KIND_VEC",
+    );
+    debug_assert!(
+        shared as usize & !KIND_MASK != 0,
+        "rebuild_vec should be called with non-null pointer: {:p}",
+        shared,
+    );
 
-impl Drop for Abort {
-    fn drop(&mut self) {
-        panic!();
+    let buf = (shared as usize & !KIND_MASK) as *mut u8;
+    let cap = (offset as usize - buf as usize) + len;
+    Vec::from_raw_parts(buf, cap, cap)
+}
+
+unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
+    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
+
+    if old_size > usize::MAX >> 1 {
+        crate::abort();
+    }
+
+    Bytes {
+        ptr,
+        len,
+        data: AtomicPtr::new(shared as _),
+        vtable: &SHARED_VTABLE,
     }
 }
 
-#[inline(never)]
 #[cold]
-fn abort() {
-    let _a = Abort;
-    panic!();
+unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), offset: *const u8, len: usize) -> Bytes {
+    // If  the buffer is still tracked in a `Vec<u8>`. It is time to
+    // promote the vec to an `Arc`. This could potentially be called
+    // concurrently, so some care must be taken.
+
+    debug_assert_eq!(ptr as usize & KIND_MASK, KIND_VEC);
+
+    // First, allocate a new `Shared` instance containing the
+    // `Vec` fields. It's important to note that `ptr`, `len`,
+    // and `cap` cannot be mutated without having `&mut self`.
+    // This means that these fields will not be concurrently
+    // updated and since the buffer hasn't been promoted to an
+    // `Arc`, those three fields still are the components of the
+    // vector.
+    let vec = rebuild_vec(ptr as *const (), offset, len);
+    let shared = Box::new(Shared {
+        _vec: vec,
+        // Initialize refcount to 2. One for this reference, and one
+        // for the new clone that will be returned from
+        // `shallow_clone`.
+        ref_cnt: AtomicUsize::new(2),
+    });
+
+    let shared = Box::into_raw(shared);
+
+    // The pointer should be aligned, so this assert should
+    // always succeed.
+    debug_assert!(0 == (shared as usize & KIND_MASK));
+
+    // Try compare & swapping the pointer into the `arc` field.
+    // `Release` is used synchronize with other threads that
+    // will load the `arc` field.
+    //
+    // If the `compare_and_swap` fails, then the thread lost the
+    // race to promote the buffer to shared. The `Acquire`
+    // ordering will synchronize with the `compare_and_swap`
+    // that happened in the other thread and the `Shared`
+    // pointed to by `actual` will be visible.
+    let actual = atom.compare_and_swap(ptr as _, shared as _, Ordering::AcqRel);
+
+    if actual as usize == ptr as usize {
+        // The upgrade was successful, the new handle can be
+        // returned.
+        return Bytes {
+            ptr: offset,
+            len,
+            data: AtomicPtr::new(shared as _),
+            vtable: &SHARED_VTABLE,
+        };
+    }
+
+    // The upgrade failed, a concurrent clone happened. Release
+    // the allocation that was made in this thread, it will not
+    // be needed.
+    let shared = Box::from_raw(shared);
+    mem::forget(*shared);
+
+    // Buffer already promoted to shared storage, so increment ref
+    // count.
+    shallow_clone_arc(actual as _, offset, len)
 }
+
+unsafe fn release_shared(ptr: *mut Shared) {
+    // `Shared` storage... follow the drop steps from Arc.
+    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
+        return;
+    }
+
+    // This fence is needed to prevent reordering of use of the data and
+    // deletion of the data.  Because it is marked `Release`, the decreasing
+    // of the reference count synchronizes with this `Acquire` fence. This
+    // means that use of the data happens before decreasing the reference
+    // count, which happens before this fence, which happens before the
+    // deletion of the data.
+    //
+    // As explained in the [Boost documentation][1],
+    //
+    // > It is important to enforce any possible access to the object in one
+    // > thread (through an existing reference) to *happen before* deleting
+    // > the object in a different thread. This is achieved by a "release"
+    // > operation after dropping a reference (any access to the object
+    // > through this reference must obviously happened before), and an
+    // > "acquire" operation before deleting the object.
+    //
+    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+    atomic::fence(Ordering::Acquire);
+
+    // Drop the data
+    Box::from_raw(ptr);
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = Bytes::from("hello world");
+///     b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::Bytes;
+/// #[deny(unused_must_use)]
+/// {
+///     let mut b1 = Bytes::from("hello world");
+///     b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+    use std::sync::Arc;
+    use loom::thread;
+
+    use super::Bytes;
+    #[test]
+    fn bytes_cloning_vec() {
+        loom::model(|| {
+            let a = Bytes::from(b"abcdefgh".to_vec());
+            let addr = a.as_ptr() as usize;
+
+            // test the Bytes::clone is Sync by putting it in an Arc
+            let a1 = Arc::new(a);
+            let a2 = a1.clone();
+
+            let t1 = thread::spawn(move || {
+                let b: Bytes = (*a1).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            let t2 = thread::spawn(move || {
+                let b: Bytes = (*a2).clone();
+                assert_eq!(b.as_ptr() as usize, addr);
+            });
+
+            t1.join().unwrap();
+            t2.join().unwrap();
+        });
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bytes/src/bytes_mut.rs
@@ -0,0 +1,1533 @@
+use core::{cmp, fmt, hash, isize, slice, usize};
+use core::mem::{self, ManuallyDrop};
+use core::ops::{Deref, DerefMut};
+use core::ptr::{self, NonNull};
+use core::iter::{FromIterator, Iterator};
+
+use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}};
+
+use crate::{Bytes, Buf, BufMut};
+use crate::bytes::Vtable;
+use crate::buf::IntoIter;
+use crate::debug;
+use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+
+/// A unique reference to a contiguous slice of memory.
+///
+/// `BytesMut` represents a unique view into a potentially shared memory region.
+/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
+/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
+/// allocations.
+///
+/// # Growth
+///
+/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
+/// necessary. However, explicitly reserving the required space up-front before
+/// a series of inserts will be more efficient.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{BytesMut, BufMut};
+///
+/// let mut buf = BytesMut::with_capacity(64);
+///
+/// buf.put_u8(b'h');
+/// buf.put_u8(b'e');
+/// buf.put(&b"llo"[..]);
+///
+/// assert_eq!(&buf[..], b"hello");
+///
+/// // Freeze the buffer so that it can be shared
+/// let a = buf.freeze();
+///
+/// // This does not allocate, instead `b` points to the same memory.
+/// let b = a.clone();
+///
+/// assert_eq!(&a[..], b"hello");
+/// assert_eq!(&b[..], b"hello");
+/// ```
+pub struct BytesMut {
+    ptr: NonNull<u8>,
+    len: usize,
+    cap: usize,
+    data: *mut Shared,
+}
+
+// Thread-safe reference-counted container for the shared storage. This mostly
+// the same as `core::sync::Arc` but without the weak counter. The ref counting
+// fns are based on the ones found in `std`.
+//
+// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
+// up making the overall code simpler and easier to reason about. This is due to
+// some of the logic around setting `Inner::arc` and other ways the `arc` field
+// is used. Using `Arc` ended up requiring a number of funky transmutes and
+// other shenanigans to make it work.
+struct Shared {
+    vec: Vec<u8>,
+    original_capacity_repr: usize,
+    ref_count: AtomicUsize,
+}
+
+// Buffer storage strategy flags.
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+// The max original capacity value. Any `Bytes` allocated with a greater initial
+// capacity will default to this.
+const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
+// The original capacity algorithm will not take effect unless the originally
+// allocated capacity was at least 1kb in size.
+const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
+// The original capacity is stored in powers of 2 starting at 1kb to a max of
+// 64kb. Representing it as such requires only 3 bits of storage.
+const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
+const ORIGINAL_CAPACITY_OFFSET: usize = 2;
+
+// When the storage is in the `Vec` representation, the pointer can be advanced
+// at most this value. This is due to the amount of storage available to track
+// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
+// bits.
+const VEC_POS_OFFSET: usize = 5;
+const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
+const NOT_VEC_POS_MASK: usize = 0b11111;
+
+#[cfg(target_pointer_width = "64")]
+const PTR_WIDTH: usize = 64;
+#[cfg(target_pointer_width = "32")]
+const PTR_WIDTH: usize = 32;
+
+/*
+ *
+ * ===== BytesMut =====
+ *
+ */
+
+impl BytesMut {
+    /// Creates a new `BytesMut` with the specified capacity.
+    ///
+    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
+    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
+    /// then `BytesMut` will not allocate.
+    ///
+    /// It is important to note that this function does not specify the length
+    /// of the returned `BytesMut`, but only the capacity.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut bytes = BytesMut::with_capacity(64);
+    ///
+    /// // `bytes` contains no data, even though there is capacity
+    /// assert_eq!(bytes.len(), 0);
+    ///
+    /// bytes.put(&b"hello world"[..]);
+    ///
+    /// assert_eq!(&bytes[..], b"hello world");
+    /// ```
+    #[inline]
+    pub fn with_capacity(capacity: usize) -> BytesMut {
+        BytesMut::from_vec(Vec::with_capacity(capacity))
+    }
+
+    /// Creates a new `BytesMut` with default capacity.
+    ///
+    /// Resulting object has length 0 and unspecified capacity.
+    /// This function does not allocate.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut bytes = BytesMut::new();
+    ///
+    /// assert_eq!(0, bytes.len());
+    ///
+    /// bytes.reserve(2);
+    /// bytes.put_slice(b"xy");
+    ///
+    /// assert_eq!(&b"xy"[..], &bytes[..]);
+    /// ```
+    #[inline]
+    pub fn new() -> BytesMut {
+        BytesMut::with_capacity(0)
+    }
+
+    /// Returns the number of bytes contained in this `BytesMut`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::from(&b"hello"[..]);
+    /// assert_eq!(b.len(), 5);
+    /// ```
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    /// Returns true if the `BytesMut` has a length of 0.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::with_capacity(64);
+    /// assert!(b.is_empty());
+    /// ```
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len == 0
+    }
+
+    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let b = BytesMut::with_capacity(64);
+    /// assert_eq!(b.capacity(), 64);
+    /// ```
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        self.cap
+    }
+
+    /// Converts `self` into an immutable `Bytes`.
+    ///
+    /// The conversion is zero cost and is used to indicate that the slice
+    /// referenced by the handle will no longer be mutated. Once the conversion
+    /// is done, the handle can be cloned and shared across threads.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    /// use std::thread;
+    ///
+    /// let mut b = BytesMut::with_capacity(64);
+    /// b.put(&b"hello world"[..]);
+    /// let b1 = b.freeze();
+    /// let b2 = b1.clone();
+    ///
+    /// let th = thread::spawn(move || {
+    ///     assert_eq!(&b1[..], b"hello world");
+    /// });
+    ///
+    /// assert_eq!(&b2[..], b"hello world");
+    /// th.join().unwrap();
+    /// ```
+    #[inline]
+    pub fn freeze(mut self) -> Bytes {
+        if self.kind() == KIND_VEC {
+            // Just re-use `Bytes` internal Vec vtable
+            unsafe {
+                let (off, _) = self.get_vec_pos();
+                let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+                mem::forget(self);
+                vec.into()
+            }
+        } else {
+            debug_assert_eq!(self.kind(), KIND_ARC);
+
+            let ptr = self.ptr.as_ptr();
+            let len = self.len;
+            let data = AtomicPtr::new(self.data as _);
+            mem::forget(self);
+            unsafe {
+                Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
+            }
+        }
+    }
+
+    /// Splits the bytes into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[0, at)`, and the returned
+    /// `BytesMut` contains elements `[at, capacity)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count
+    /// and sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut a = BytesMut::from(&b"hello world"[..]);
+    /// let mut b = a.split_off(5);
+    ///
+    /// a[0] = b'j';
+    /// b[0] = b'!';
+    ///
+    /// assert_eq!(&a[..], b"jello");
+    /// assert_eq!(&b[..], b"!world");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > capacity`.
+    #[must_use = "consider BytesMut::truncate if you don't need the other half"]
+    pub fn split_off(&mut self, at: usize) -> BytesMut {
+        assert!(at <= self.capacity());
+        unsafe {
+            let mut other = self.shallow_clone();
+            other.set_start(at);
+            self.set_end(at);
+            other
+        }
+    }
+
+    /// Removes the bytes from the current view, returning them in a new
+    /// `BytesMut` handle.
+    ///
+    /// Afterwards, `self` will be empty, but will retain any additional
+    /// capacity that it had before the operation. This is identical to
+    /// `self.split_to(self.len())`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut buf = BytesMut::with_capacity(1024);
+    /// buf.put(&b"hello world"[..]);
+    ///
+    /// let other = buf.split();
+    ///
+    /// assert!(buf.is_empty());
+    /// assert_eq!(1013, buf.capacity());
+    ///
+    /// assert_eq!(other, b"hello world"[..]);
+    /// ```
+    #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
+    pub fn split(&mut self) -> BytesMut {
+        let len = self.len();
+        self.split_to(len)
+    }
+
+    /// Splits the buffer into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
+    /// contains elements `[0, at)`.
+    ///
+    /// This is an `O(1)` operation that just increases the reference count and
+    /// sets a few indices.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut a = BytesMut::from(&b"hello world"[..]);
+    /// let mut b = a.split_to(5);
+    ///
+    /// a[0] = b'!';
+    /// b[0] = b'j';
+    ///
+    /// assert_eq!(&a[..], b"!world");
+    /// assert_eq!(&b[..], b"jello");
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`.
+    #[must_use = "consider BytesMut::advance if you don't need the other half"]
+    pub fn split_to(&mut self, at: usize) -> BytesMut {
+        assert!(at <= self.len());
+
+        unsafe {
+            let mut other = self.shallow_clone();
+            other.set_end(at);
+            self.set_start(at);
+            other
+        }
+    }
+
+    /// Shortens the buffer, keeping the first `len` bytes and dropping the
+    /// rest.
+    ///
+    /// If `len` is greater than the buffer's current length, this has no
+    /// effect.
+    ///
+    /// The [`split_off`] method can emulate `truncate`, but this causes the
+    /// excess bytes to be returned instead of dropped.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello world"[..]);
+    /// buf.truncate(5);
+    /// assert_eq!(buf, b"hello"[..]);
+    /// ```
+    ///
+    /// [`split_off`]: #method.split_off
+    pub fn truncate(&mut self, len: usize) {
+        if len <= self.len() {
+            unsafe { self.set_len(len); }
+        }
+    }
+
+    /// Clears the buffer, removing all data.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello world"[..]);
+    /// buf.clear();
+    /// assert!(buf.is_empty());
+    /// ```
+    pub fn clear(&mut self) {
+        self.truncate(0);
+    }
+
+    /// Resizes the buffer so that `len` is equal to `new_len`.
+    ///
+    /// If `new_len` is greater than `len`, the buffer is extended by the
+    /// difference with each additional byte set to `value`. If `new_len` is
+    /// less than `len`, the buffer is simply truncated.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::new();
+    ///
+    /// buf.resize(3, 0x1);
+    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+    ///
+    /// buf.resize(2, 0x2);
+    /// assert_eq!(&buf[..], &[0x1, 0x1]);
+    ///
+    /// buf.resize(4, 0x3);
+    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
+    /// ```
+    pub fn resize(&mut self, new_len: usize, value: u8) {
+        let len = self.len();
+        if new_len > len {
+            let additional = new_len - len;
+            self.reserve(additional);
+            unsafe {
+                let dst = self.bytes_mut().as_mut_ptr();
+                ptr::write_bytes(dst, value, additional);
+                self.set_len(new_len);
+            }
+        } else {
+            self.truncate(new_len);
+        }
+    }
+
+    /// Sets the length of the buffer.
+    ///
+    /// This will explicitly set the size of the buffer without actually
+    /// modifying the data, so it is up to the caller to ensure that the data
+    /// has been initialized.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut b = BytesMut::from(&b"hello world"[..]);
+    ///
+    /// unsafe {
+    ///     b.set_len(5);
+    /// }
+    ///
+    /// assert_eq!(&b[..], b"hello");
+    ///
+    /// unsafe {
+    ///     b.set_len(11);
+    /// }
+    ///
+    /// assert_eq!(&b[..], b"hello world");
+    /// ```
+    pub unsafe fn set_len(&mut self, len: usize) {
+        debug_assert!(len <= self.cap);
+        self.len = len;
+    }
+
+    /// Reserves capacity for at least `additional` more bytes to be inserted
+    /// into the given `BytesMut`.
+    ///
+    /// More than `additional` bytes may be reserved in order to avoid frequent
+    /// reallocations. A call to `reserve` may result in an allocation.
+    ///
+    /// Before allocating new buffer space, the function will attempt to reclaim
+    /// space in the existing buffer. If the current handle references a small
+    /// view in the original buffer and all other handles have been dropped,
+    /// and the requested capacity is less than or equal to the existing
+    /// buffer's capacity, then the current view will be copied to the front of
+    /// the buffer and the handle will take ownership of the full buffer.
+    ///
+    /// # Examples
+    ///
+    /// In the following example, a new buffer is allocated.
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::from(&b"hello"[..]);
+    /// buf.reserve(64);
+    /// assert!(buf.capacity() >= 69);
+    /// ```
+    ///
+    /// In the following example, the existing buffer is reclaimed.
+    ///
+    /// ```
+    /// use bytes::{BytesMut, BufMut};
+    ///
+    /// let mut buf = BytesMut::with_capacity(128);
+    /// buf.put(&[0; 64][..]);
+    ///
+    /// let ptr = buf.as_ptr();
+    /// let other = buf.split();
+    ///
+    /// assert!(buf.is_empty());
+    /// assert_eq!(buf.capacity(), 64);
+    ///
+    /// drop(other);
+    /// buf.reserve(128);
+    ///
+    /// assert_eq!(buf.capacity(), 128);
+    /// assert_eq!(buf.as_ptr(), ptr);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if the new capacity overflows `usize`.
+    #[inline]
+    pub fn reserve(&mut self, additional: usize) {
+        let len = self.len();
+        let rem = self.capacity() - len;
+
+        if additional <= rem {
+            // The handle can already store at least `additional` more bytes, so
+            // there is no further work needed to be done.
+            return;
+        }
+
+        self.reserve_inner(additional);
+    }
+
+    // In separate function to allow the short-circuits in `reserve` to
+    // be inline-able. Significant helps performance.
+    fn reserve_inner(&mut self, additional: usize) {
+        let len = self.len();
+        let kind = self.kind();
+
+        if kind == KIND_VEC {
+            // If there's enough free space before the start of the buffer, then
+            // just copy the data backwards and reuse the already-allocated
+            // space.
+            //
+            // Otherwise, since backed by a vector, use `Vec::reserve`
+            unsafe {
+                let (off, prev) = self.get_vec_pos();
+
+                // Only reuse space if we stand to gain at least capacity/2
+                // bytes of space back
+                if off >= additional && off >= (self.cap / 2) {
+                    // There's space - reuse it
+                    //
+                    // Just move the pointer back to the start after copying
+                    // data back.
+                    let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
+                    ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
+                    self.ptr = vptr(base_ptr);
+                    self.set_vec_pos(0, prev);
+
+                    // Length stays constant, but since we moved backwards we
+                    // can gain capacity back.
+                    self.cap += off;
+                } else {
+                    // No space - allocate more
+                    let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+                    v.reserve(additional);
+
+                    // Update the info
+                    self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
+                    self.len = v.len() - off;
+                    self.cap = v.capacity() - off;
+                }
+
+                return;
+            }
+        }
+
+        debug_assert_eq!(kind, KIND_ARC);
+        let shared: *mut Shared = self.data as _;
+
+
+        // Reserving involves abandoning the currently shared buffer and
+        // allocating a new vector with the requested capacity.
+        //
+        // Compute the new capacity
+        let mut new_cap = len.checked_add(additional).expect("overflow");
+
+        let original_capacity;
+        let original_capacity_repr;
+
+        unsafe {
+            original_capacity_repr = (*shared).original_capacity_repr;
+            original_capacity = original_capacity_from_repr(original_capacity_repr);
+
+            // First, try to reclaim the buffer. This is possible if the current
+            // handle is the only outstanding handle pointing to the buffer.
+            if (*shared).is_unique() {
+                // This is the only handle to the buffer. It can be reclaimed.
+                // However, before doing the work of copying data, check to make
+                // sure that the vector has enough capacity.
+                let v = &mut (*shared).vec;
+
+                if v.capacity() >= new_cap {
+                    // The capacity is sufficient, reclaim the buffer
+                    let ptr = v.as_mut_ptr();
+
+                    ptr::copy(self.ptr.as_ptr(), ptr, len);
+
+                    self.ptr = vptr(ptr);
+                    self.cap = v.capacity();
+
+                    return;
+                }
+
+                // The vector capacity is not sufficient. The reserve request is
+                // asking for more than the initial buffer capacity. Allocate more
+                // than requested if `new_cap` is not much bigger than the current
+                // capacity.
+                //
+                // There are some situations, using `reserve_exact` that the
+                // buffer capacity could be below `original_capacity`, so do a
+                // check.
+                let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+                new_cap = cmp::max(
+                    cmp::max(double, new_cap),
+                    original_capacity);
+            } else {
+                new_cap = cmp::max(new_cap, original_capacity);
+            }
+        }
+
+        // Create a new vector to store the data
+        let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
+
+        // Copy the bytes
+        v.extend_from_slice(self.as_ref());
+
+        // Release the shared handle. This must be done *after* the bytes are
+        // copied.
+        unsafe { release_shared(shared) };
+
+        // Update self
+        let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+        self.data = data as _;
+        self.ptr = vptr(v.as_mut_ptr());
+        self.len = v.len();
+        self.cap = v.capacity();
+    }
+    /// Appends given bytes to this object.
+    ///
+    /// If this `BytesMut` object has not enough capacity, it is resized first.
+    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let mut buf = BytesMut::with_capacity(0);
+    /// buf.extend_from_slice(b"aaabbb");
+    /// buf.extend_from_slice(b"cccddd");
+    ///
+    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+    /// ```
+    pub fn extend_from_slice(&mut self, extend: &[u8]) {
+        let cnt = extend.len();
+        self.reserve(cnt);